Merge "mmc: core: Determine correct access mode for eMMC cards by reading OCR" into msm-4.9
diff --git a/Documentation/crypto/msm/msm_ice_driver.txt b/Documentation/crypto/msm/msm_ice_driver.txt
new file mode 100644
index 0000000..ddb8176
--- /dev/null
+++ b/Documentation/crypto/msm/msm_ice_driver.txt
@@ -0,0 +1,235 @@
+Introduction:
+=============
+Storage encryption has been one of the most required feature from security
+point of view. QTI based storage encryption solution uses general purpose
+crypto engine. While this kind of solution provide a decent amount of
+performance, it falls short as storage speed is improving significantly
+continuously. To overcome performance degradation, newer chips are going to
+have Inline Crypto Engine (ICE) embedded into storage device. ICE is supposed
+to meet the line speed of storage devices.
+
+Hardware Description
+====================
+ICE is a HW block that is embedded into storage device such as UFS/eMMC. By
+default, ICE works in bypass mode i.e. ICE HW does not perform any crypto
+operation on data to be processed by storage device. If required, ICE can be
+configured to perform crypto operation in one direction (i.e. either encryption
+or decryption) or in both direction(both encryption & decryption).
+
+When a switch between the operation modes(plain to crypto or crypto to plain)
+is desired for a particular partition, SW must complete all transactions for
+that particular partition before switching the crypto mode i.e. no crypto, one
+direction crypto or both direction crypto operation. Requests for other
+partitions are not impacted due to crypto mode switch.
+
+ICE HW currently supports AES128/256 bit ECB & XTS mode encryption algorithms.
+
+Keys for crypto operations are loaded from SW. Keys are stored in a lookup
+table(LUT) located inside ICE HW. Maximum of 32 keys can be loaded in ICE key
+LUT. A Key inside the LUT can be referred using a key index.
+
+SW Description
+==============
+ICE HW has catagorized ICE registers in 2 groups: those which can be accessed by
+only secure side i.e. TZ and those which can be accessed by non-secure side such
+as HLOS as well. This requires that ICE driver to be split in two pieces: one
+running from TZ space and another from HLOS space.
+
+ICE driver from TZ would configure keys as requested by HLOS side.
+
+ICE driver on HLOS side is responsible for initialization of ICE HW.
+
+SW Architecture Diagram
+=======================
+Following are all the components involved in the ICE driver for control path:
+
++++++++++++++++++++++++++++++++++++++++++
++               App layer               +
++++++++++++++++++++++++++++++++++++++++++
++             System layer              +
++   ++++++++         +++++++            +
++   + VOLD +         + PFM +            +
++   ++++++++         +++++++            +
++         ||         ||                 +
++         ||         ||                 +
++         \/         \/                 +
++        ++++++++++++++                 +
++        + LibQSEECom +                 +
++        ++++++++++++++                 +
++++++++++++++++++++++++++++++++++++++++++
++             Kernel                    +       +++++++++++++++++
++                                       +       +     KMS       +
++  +++++++  +++++++++++  +++++++++++    +       +++++++++++++++++
++  + ICE +  + Storage +  + QSEECom +    +       +   ICE Driver  +
++++++++++++++++++++++++++++++++++++++++++ <===> +++++++++++++++++
+               ||                                    ||
+               ||                                    ||
+               \/                                    \/
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
++                      Storage Device                           +
++                      ++++++++++++++                           +
++                      +   ICE HW   +                           +
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Use Cases:
+----------
+a) Device bootup
+ICE HW is detected during bootup time and corresponding probe function is
+called. ICE driver parses its data from device tree node. ICE HW and storage
+HW are tightly coupled. Storage device probing is dependent upon ICE device
+probing. ICE driver configures all the required registers to put the ICE HW
+in bypass mode.
+
+b) Configuring keys
+Currently, there are couple of use cases to configure the keys.
+
+1) Full Disk Encryption(FDE)
+System layer(VOLD) at invocation of apps layer would call libqseecom to create
+the encryption key. Libqseecom calls qseecom driver to communicate with KMS
+module on the secure side i.e. TZ. KMS would call ICE driver on the TZ side to
+create and set the keys in ICE HW. At the end of transaction, VOLD would have
+key index of key LUT where encryption key is present.
+
+2) Per File Encryption (PFE)
+Per File Manager(PFM) calls QSEECom api to create the key. PFM has a peer comp-
+onent(PFT) at kernel layer which gets the corresponding key index from PFM.
+
+Following are all the components involved in the ICE driver for data path:
+
++++++++++++++++++++++++++++++++++++++++++
++               App layer               +
++++++++++++++++++++++++++++++++++++++++++
++              VFS                      +
++---------------------------------------+
++         File System (EXT4)            +
++---------------------------------------+
++             Block Layer               +
++ --------------------------------------+
++                              +++++++  +
++              dm-req-crypt => + PFT +  +
++                              +++++++  +
++                                       +
++---------------------------------------+
++    +++++++++++           +++++++      +
++    + Storage +           + ICE +      +
++++++++++++++++++++++++++++++++++++++++++
++                  ||                   +
++                  || (Storage Req with +
++                  \/  ICE parameters ) +
++++++++++++++++++++++++++++++++++++++++++
++          Storage Device               +
++          ++++++++++++++               +
++          +   ICE HW   +               +
++++++++++++++++++++++++++++++++++++++++++
+
+c) Data transaction
+Once the crypto key has been configured, VOLD/PFM creates device mapping for
+data partition. As part of device mapping VOLD passes key index, crypto
+algorithm, mode and key length to dm layer. In case of PFE, keys are provided
+by PFT as and when request is processed by dm-req-crypt. When any application
+needs to read/write data, it would go through DM layer which would add crypto
+information, provided by VOLD/PFT, to Request. For each Request, Storage driver
+would ask ICE driver to configure crypto part of request. ICE driver extracts
+crypto data from Request structure and provide it to storage driver which would
+finally dispatch request to storage device.
+
+d) Error Handling
+Due to issue # 1 mentioned in "Known Issues", ICE driver does not register for
+any interrupt. However, it enables sources of interrupt for ICE HW. After each
+data transaction, Storage driver receives transaction completion event. As part
+of event handling, storage driver calls  ICE driver to check if any of ICE
+interrupt status is set. If yes, storage driver returns error to upper layer.
+
+Error handling would be changed in future chips.
+
+Interfaces
+==========
+ICE driver exposes interfaces for storage driver to :
+1. Get the global instance of ICE driver
+2. Get the implemented interfaces of the particular ice instance
+3. Initialize the ICE HW
+4. Reset the ICE HW
+5. Resume/Suspend the ICE HW
+6. Get the Crypto configuration for the data request for storage
+7. Check if current data transaction has generated any interrupt
+
+Driver Parameters
+=================
+This driver is built and statically linked into the kernel; therefore,
+there are no module parameters supported by this driver.
+
+There are no kernel command line parameters supported by this driver.
+
+Power Management
+================
+ICE driver does not do power management on its own as it is part of storage
+hardware. Whenever storage driver receives request for power collapse/suspend
+resume, it would call ICE driver which exposes APIs for Storage HW. ICE HW
+during power collapse or reset, wipes crypto configuration data. When ICE
+driver receives request to resume, it would ask ICE driver on TZ side to
+restore the configuration. ICE driver does not do anything as part of power
+collapse or suspend event.
+
+Interface:
+==========
+ICE driver exposes following APIs for storage driver to use:
+
+int (*init)(struct platform_device *, void *, ice_success_cb, ice_error_cb);
+	-- This function is invoked by storage controller during initialization of
+	storage controller. Storage controller would provide success and error call
+	backs which would be invoked asynchronously once ICE HW init is done.
+
+int (*reset)(struct platform_device *);
+	-- ICE HW reset as part of storage controller reset. When storage controller
+	received reset command, it would call reset on ICE HW. As of now, ICE HW
+	does not need to do anything as part of reset.
+
+int (*resume)(struct platform_device *);
+	-- ICE HW while going to reset, wipes all crypto keys and other data from ICE
+	HW. ICE driver would reconfigure those data as part of resume operation.
+
+int (*suspend)(struct platform_device *);
+	-- This API would be called by storage driver when storage device is going to
+	suspend mode. As of today, ICE driver does not do anything to handle suspend.
+
+int (*config)(struct platform_device *, struct request* , struct ice_data_setting*);
+	-- Storage driver would call this interface to get all crypto data required to
+	perform crypto operation.
+
+int (*status)(struct platform_device *);
+	-- Storage driver would call this interface to check if previous data transfer
+	generated any error.
+
+Config options
+==============
+This driver is enabled by the kernel config option CONFIG_CRYPTO_DEV_MSM_ICE.
+
+Dependencies
+============
+ICE driver depends upon corresponding ICE driver on TZ side to function
+appropriately.
+
+Known Issues
+============
+1. ICE HW emits 0s even if it has generated an interrupt
+This issue has significant impact on how ICE interrupts are handled. Currently,
+ICE driver does not register for any of the ICE interrupts but enables the
+sources of interrupt. Once storage driver asks to check the status of interrupt,
+it reads and clears the clear status and provide read status to storage driver.
+This mechanism though not optimal but prevents filesystem curruption.
+This issue has been fixed in newer chips.
+
+2. ICE HW wipes all crypto data during power collapse
+This issue necessiate that ICE driver on TZ side store the crypto material
+which is not required in the case of general purpose crypto engine.
+This issue has been fixed in newer chips.
+
+Further Improvements
+====================
+Currently, Due to PFE use case, ICE driver is dependent upon dm-req-crypt to
+provide the keys as part of request structure. This couples ICE driver with
+dm-req-crypt based solution. It is under discussion to expose an IOCTL based
+and registeration based interface APIs from ICE driver. ICE driver would use
+these two interfaces to find out if any key exists for current request. If
+yes, choose the right key index received from IOCTL or registeration based
+APIs. If not, dont set any crypto parameter in the request.
diff --git a/Documentation/crypto/msm/qce.txt b/Documentation/crypto/msm/qce.txt
new file mode 100644
index 0000000..9f1b313b
--- /dev/null
+++ b/Documentation/crypto/msm/qce.txt
@@ -0,0 +1,228 @@
+Introduction:
+=============
+
+The QTI crypto engine (qce) driver is a module that
+provides common services for accessing the QTI crypto device.
+Currently, the two main clients of qce are
+-qcrypto driver (module provided for accessing CE HW by kernel space apps)
+-qcedev driver (module provided for accessing CE HW by user space apps)
+
+
+The crypto engine (qce) driver is a client to the DMA driver for the QTI
+DMA device - Application Data Mover (ADM). ADM is used to provide the DMA
+transfer capability between QTI crypto device hardware and DDR memory
+for crypto operations.
+
+  Figure 1.
+  ---------
+
+  Linux kernel
+  (ex:IPSec)<-----*  QTI crypto driver----+
+			(qcrypto)	  |
+		   (for kernel space app) |
+					  |
+					  +-->|
+					      |
+					      | *qce   <----> QTI
+					      | driver        ADM driver <---> ADM HW
+					  +-->|			|		|
+					  |			|		|
+					  |			|		|
+					  |			|		|
+   Linux kernel				  |			|		|
+   misc device  <--- *QCEDEV Driver-------+			|		|
+   interface             (qcedev) 			(Reg interface)	 (DMA interface)
+			(for user space app)			\		/
+								 \	       /
+								  \	      /
+								   \	     /
+								    \	    /
+								     \	   /
+								      \	  /
+								QTI crypto CE3 HW
+
+
+ The entities marked with (*) in the Figure 1, are the software components of
+ the Linux QTI crypto modules.
+
+===============
+IMPORTANT NOTE:
+===============
+(1) The CE hardware can be accessed either from user space OR kernel space,
+    at one time. Both user space and kernel space clients cannot access the
+    qce driver (and the CE hardware) at the same time.
+	- If your device has user space apps that needs to access the crypto
+	  hardware, make sure to have the qcrypto module disabled/unloaded.
+	  This will result in the kernel space apps to use the registered
+	  software implementation of the crypto algorithms.
+	- If your device has kernel space apps that needs to access the
+	  crypto hardware, make sure to have qcedev module disabled/unloaded
+	  and implement your user space application to use the software
+	  implementation (ex: openssl/crypto) of the crypto algorithms.
+
+(2) If your device has Playready(Windows Media DRM) application enabled and
+    uses the qcedev module to access the crypto hardware accelerator,
+    please be informed that for performance reasons, the CE hardware will need
+    to be dedicated to playready application.  Any other user space application
+    should be implemented to use the SW implementation (ex: openssl/crypto)
+    of the crypto algorithms.
+
+
+Hardware description:
+=====================
+
+QTI Crypto HW device family provides a series of algorithms implemented
+in the device hardware.
+
+Crypto 2 hardware provides hashing - SHA-1, SHA-256, ciphering - DES, 3DES, AES
+algorithms, and concurrent operations of hashing, and ciphering.
+
+In addition to those functions provided by Crypto 2 HW, Crypto 3 HW provides
+fast AES algorithms.
+
+In addition to those functions provided by Crypto 3 HW, Crypto 3E provides
+HMAC-SHA1 hashing algorithm, and Over The Air (OTA) f8/f9 algorithms as
+defined by the 3GPP forum.
+
+
+Software description
+====================
+
+The crypto device is defined as a platform device. The driver is
+independent of the platform. The driver supports multiple instances of
+crypto HW.
+All the platform specific parameters are defined in the board init
+file, eg. arch/arm/mach-msm/board-msm7x30.c for MSM7x30.
+
+The qce driver provide the common services of HW crypto
+access to the two drivers as listed above (qcedev, qcrypto. It sets up
+the crypto HW device for the operation, then it requests ADM driver for
+the DMA of the crypto operation.
+
+Two ADM channels and two command lists (one command list for each
+channel) are involved in an operation.
+
+The setting up of the command lists and the procedure of the operation
+of the crypto device are described in the following sections.
+
+The command list for the first DMA channel is set up as follows:
+
+  1st command of the list is for the DMA transfer from DDR memory to the
+  crypto device to input data to crypto device. The dst crci of the command
+  is set for crci-in for this crypto device.
+
+  2nd command is for the DMA transfer is from crypto device to DDR memory for
+  the authentication result. The src crci is set as crci-hash-done of the
+  crypto device. If authentication is not required in the operation,
+  the 2nd command is not used.
+
+The command list for the second DMA channel is set up as follows:
+
+  One command to DMA data from crypto device to DDR memory for encryption or
+  decryption output from crypto device.
+
+To accomplish ciphering and authentication concurrent operations, the driver
+performs the following steps:
+    (a). set up HW crypto device
+    (b). hit the crypto go register.
+    (c). issue the DMA command of first channel to the ADM driver,
+    (d). issue the DMA command of 2nd channel to the ADM driver.
+
+SHA1/SHA256 is an authentication/integrity hash algorithm. To accomplish
+hash operation (or any authentication only algorithm), 2nd DMA channel is
+not required. Only steps (a) to (c) are performed.
+
+At the completion of the DMA operation (for (c) and (d)) ADM driver
+invokes the callback registered to the DMA driver. This signifies the end of
+the DMA operation(s). The driver reads the status and other information from
+the CE hardware register and then invokes the callback to the qce driver client.
+This signal the completion and the results of the DMA along with the status of
+the CE hardware to the qce driver client. This completes a crypto operation.
+
+In the qce driver initialization, memory for the two command lists, descriptor
+lists for each crypto device are allocated out of coherent memory, using Linux
+DMA API. The driver pre-configures most of the two ADM command lists
+in the initialization. During each crypto operation, minimal set up is required.
+src_dscr or/and dst_dscr descriptor list of the ADM command are populated
+from the information obtained from the corresponding data structure. eg: for
+AEAD request, the following data structure provides the information:
+
+    struct aead_request *req
+      ......
+    req->assoc
+    req->src
+    req->dst
+
+The DMA address of a scatter list will be retrieved and set up in the
+descriptor list of an ADM command.
+
+Power Management
+================
+  none
+
+
+Interface:
+==========
+
+The interface is defined in qce.h
+
+The clients qcrypto, qcedev drivers are the clients using
+the interfaces.
+
+The following services are provided by the qce driver -
+
+     qce_open(), qce_close(), qce_ablk_cipher_req(),
+     qce_hw_support(), qce_process_sha_req()
+
+  qce_open() is the first request from the client, ex. QTI crypto
+  driver (qcedev, qcrypto), to open a crypto engine. It is normally
+  called at the probe function of the client for a device. During the
+  probe,
+  - ADM command list structure will be set up
+  - Crypto device will be initialized.
+  - Resource associated with the crypto engine is retrieved by doing
+    platform_get_resource() or platform_get_resource_byname().
+
+ The resources for a device are
+    - crci-in, crci-out, crci-hash-done
+    - two DMA channel IDs, one for encryption and decryption input, one for
+      output.
+    - base address of the HW crypto device.
+
+  qce_close() is the last request from the client. Normally, it is
+  called from the remove function of the client.
+
+  qce_hw_support() allows the client to query what is supported
+  by the crypto engine hardware.
+
+  qce_ablk_cipher_req() provides ciphering service to the client.
+  qce_process_sha_req() provide hashing service to the client.
+  qce_aead_req() provide aead service to the client.
+
+Module parameters:
+==================
+
+The following module parameters are defined in the board init file.
+-CE hardware base register address
+-Data mover channel used for transfer to/from CE hardware
+These parameters differ in each platform.
+
+
+Dependencies:
+=============
+
+Existing DMA driver.
+The transfers are DMA'ed between the crypto hardware and DDR memory via the
+data mover, ADM. The data transfers are set up to use the existing dma driver.
+
+User space utilities:
+=====================
+  n/a
+
+Known issues:
+=============
+  n/a
+
+To do:
+======
+  n/a
diff --git a/Documentation/crypto/msm/qcedev.txt b/Documentation/crypto/msm/qcedev.txt
new file mode 100644
index 0000000..0638dd9
--- /dev/null
+++ b/Documentation/crypto/msm/qcedev.txt
@@ -0,0 +1,231 @@
+Introduction:
+=============
+
+This driver provides IOCTLS for user space application to access crypto
+engine hardware for the qcedev crypto services. The driver supports the
+following crypto algorithms
+- AES-128, AES-256 (ECB, CBC and CTR mode)
+- AES-192, (ECB, CBC and CTR mode)
+  (support exists on platform supporting CE 3.x hardware)
+- SHA1/SHA256
+- AES-128, AES-256 (XTS), AES CMAC, SHA1/SHA256 HMAC
+  (support exists on platform supporting CE 4.x hardware)
+
+Hardware description:
+=====================
+Crypto 3E provides cipher and hash algorithms as defined in the
+3GPP forum specifications.
+
+
+Software description
+====================
+
+The driver is a Linux platform device driver. For an msm target,
+there can be multiple crypto devices assigned for QCEDEV.
+
+The driver is a misc device driver as well.
+The following operations are registered in the driver,
+-qcedev_ioctl()
+-qcedev_open()
+-qcedev_release()
+
+The following IOCTLS are available to the user space application(s)-
+
+  Cipher IOCTLs:
+  --------------
+    QCEDEV_IOCTL_ENC_REQ is for encrypting data.
+    QCEDEV_IOCTL_DEC_REQ is for decrypting data.
+
+  Hashing/HMAC IOCTLs
+  -------------------
+
+    QCEDEV_IOCTL_SHA_INIT_REQ is for initializing a hash/hmac request.
+    QCEDEV_IOCTL_SHA_UPDATE_REQ is for updating hash/hmac.
+    QCEDEV_IOCTL_SHA_FINAL_REQ is for ending the hash/mac request.
+    QCEDEV_IOCTL_GET_SHA_REQ is for retrieving the hash/hmac for data
+	packet of known size.
+    QCEDEV_IOCTL_GET_CMAC_REQ is for retrieving the MAC (using AES CMAC
+	algorithm) for data packet of known size.
+
+The requests are synchronous. The driver will put the process to
+sleep, waiting for the completion of the requests using wait_for_completion().
+
+Since the requests are coming out of user space application, before giving
+the requests to the low level qce driver, the ioctl requests and the
+associated input/output buffer will have to be safe checked, and copied
+to/from kernel space.
+
+The extra copying of requests/buffer can affect the performance. The issue
+with copying the data buffer is resolved by having the client use PMEM
+allocated buffers.
+
+NOTE:  Using memory allocated via PMEM is supported only for in place
+       operations where source and destination buffers point to the same
+       location. Support for different source and destination buffers
+       is not supported currently.
+       Furthermore, when using PMEM, and in AES CTR mode, when issuing an
+       encryption or decryption request, a non-zero byteoffset is not
+       supported.
+
+The design of the driver is to allow multiple open, and multiple requests
+to be issued from application(s). Therefore, the driver will internally queue
+the requests, and serialize the requests to the low level qce (or qce40) driver.
+
+On an IOCTL request from an application, if there is no outstanding
+request, a the driver will issue a "qce" request, otherwise,
+the request is queued in the driver queue.  The process is suspended
+waiting for completion.
+
+On completion of a request by the low level qce driver, the internal
+tasklet (done_tasklet) is scheduled. The sole purpose of done_tasklet is
+to call the completion of the current active request (complete()), and
+issue more requests to the qce, if any.
+When the process wakes up from wait_for_completion(), it will collect the
+return code, and return the ioctl.
+
+A spin lock is used to protect the critical section of internal queue to
+be accessed from multiple tasks, SMP, and completion callback
+from qce.
+
+The driver maintains a set of statistics using debug fs. The files are
+in /debug/qcedev/stats1, /debug/qcedev/stats2, /debug/qcedev/stats3;
+one for each instance of device. Reading the file associated with
+a device will retrieve the driver statistics for that device.
+Any write to the file will clear the statistics.
+
+
+Power Management
+================
+n/a
+
+
+Interface:
+==========
+
+Linux user space applications will need to open a handle
+(file descriptor) to the qcedev device.  This is achieved by doing
+the following to retrieve a file descriptor to the device.
+
+     fd = open("/dev/qce", O_RDWR);
+     ..
+     ioctl(fd, ...);
+
+Once a valid fd is retrieved, user can call the following ioctls with
+the fd as the first parameter and a pointer to an appropriate data
+structure, qcedev_cipher_op_req or qcedev_sha_op_req (depending on
+cipher/hash functionality) as the second parameter.
+
+The following IOCTLS are available to the user space application(s)-
+
+  Cipher IOCTLs:
+  --------------
+    QCEDEV_IOCTL_ENC_REQ is for encrypting data.
+    QCEDEV_IOCTL_DEC_REQ is for decrypting data.
+
+	The caller of the IOCTL passes a pointer to the structure shown
+	below, as the second parameter.
+
+	struct	qcedev_cipher_op_req {
+		int				use_pmem;
+		union{
+			struct qcedev_pmem_info pmem;
+			struct qcedev_vbuf_info vbuf;
+		};
+		uint32_t			entries;
+		uint32_t			data_len;
+		uint8_t				in_place_op;
+		uint8_t				enckey[QCEDEV_MAX_KEY_SIZE];
+		uint32_t			encklen;
+		uint8_t				iv[QCEDEV_MAX_IV_SIZE];
+		uint32_t			ivlen;
+		uint32_t			byteoffset;
+		enum qcedev_cipher_alg_enum	alg;
+		enum qcedev_cipher_mode_enum	mode;
+		enum qcedev_oper_enum		op;
+	};
+
+  Hashing/HMAC IOCTLs
+  -------------------
+
+    QCEDEV_IOCTL_SHA_INIT_REQ is for initializing a hash/hmac request.
+    QCEDEV_IOCTL_SHA_UPDATE_REQ is for updating hash/hmac.
+    QCEDEV_IOCTL_SHA_FINAL_REQ is for ending the hash/mac request.
+    QCEDEV_IOCTL_GET_SHA_REQ is for retrieving the hash/hmac for data
+	packet of known size.
+    QCEDEV_IOCTL_GET_CMAC_REQ is for retrieving the MAC (using AES CMAC
+	algorithm) for data packet of known size.
+
+	The caller of the IOCTL passes a pointer to the structure shown
+	below, as the second parameter.
+
+	struct	qcedev_sha_op_req {
+		struct buf_info			data[QCEDEV_MAX_BUFFERS];
+		uint32_t			entries;
+		uint32_t			data_len;
+		uint8_t				digest[QCEDEV_MAX_SHA_DIGEST];
+		uint32_t			diglen;
+		uint8_t				*authkey;
+		uint32_t			authklen;
+		enum qcedev_sha_alg_enum	alg;
+		struct qcedev_sha_ctxt		ctxt;
+	};
+
+The IOCTLs and associated request data structures are defined in qcedev.h
+
+
+Module parameters:
+==================
+
+The following module parameters are defined in the board init file.
+-CE hardware nase register address
+-Data mover channel used for transfer to/from CE hardware
+These parameters differ in each platform.
+
+
+
+Dependencies:
+=============
+qce driver. Please see Documentation/arm/msm/qce.txt.
+
+
+User space utilities:
+=====================
+
+none
+
+Known issues:
+=============
+
+none.
+
+
+To do:
+======
+  Enhance Cipher functionality:
+  (1) Add support for handling > 32KB for ciphering functionality when
+  - operation is not an "in place" operation (source != destination).
+    (when using PMEM allocated memory)
+
+Limitations:
+============
+  (1) In case of cipher functionality, Driver does not support
+      a combination of different memory sources for source/destination.
+      In other words,  memory pointed to by  src and dst,
+      must BOTH (src/dst) be "pmem" or BOTH(src/dst) be "vbuf".
+
+  (2) In case of hash functionality, driver does not support handling data
+      buffers allocated via PMEM.
+
+  (3) Do not load this driver if your device already has kernel space apps
+      that need to access the crypto hardware.
+      Make sure to have qcedev module disabled/unloaded and implement your user
+      space application to use the software implementation (ex: openssl/crypto)
+      of the crypto algorithms.
+      (NOTE:  Please refer to details on the limitations listed in qce.txt)
+
+  (4) If your device has Playready (Windows Media DRM) application enabled
+      and uses the qcedev module to access the crypto hardware accelerator,
+      please be informed that for performance reasons, the CE hardware will
+      need to be dedicated to playready application.  Any other user space
+      application should be implemented to use the software implementation
+      (ex: openssl/crypto) of the crypto algorithms.
diff --git a/Documentation/crypto/msm/qcrypto.txt b/Documentation/crypto/msm/qcrypto.txt
new file mode 100644
index 0000000..2503103
--- /dev/null
+++ b/Documentation/crypto/msm/qcrypto.txt
@@ -0,0 +1,142 @@
+Introduction:
+=============
+
+QTI Crypto (qcrypto) driver is a Linux crypto driver which interfaces
+with the Linux kernel crypto API layer to provide the HW crypto functions.
+This driver is accessed by kernel space apps via the kernel crypto API layer.
+At present there is no means for user space apps to access this module.
+
+Hardware description:
+=====================
+
+QTI Crypto HW device family provides a series of algorithms implemented
+in the device.
+
+Crypto 2 hardware provides hashing - SHA-1, SHA-256, ciphering - DES, 3DES, AES
+algorithms, and concurrent operations of hashing, and ciphering.
+
+In addition to those functions provided by Crypto 2 HW, Crypto 3 provides fast
+AES algorithms.
+
+In addition to those functions provided by Crypto 3 HW, Crypto 3E provides
+HMAC-SHA1 hashing algorithm.
+
+In addition to those functions provided by Crypto 3 HW, Crypto 4.0 provides
+HMAC-SHA1/SHA256, AES CBC-MAC hashing algorithm and AES XTS/CCM cipher
+algorithms.
+
+
+Software description
+====================
+
+The module init function (_qcrypto_init()), does a platform_register(),
+to register the driver. As the result, the driver probe function,
+_qcrypto_probe(), will be invoked for each registered device.
+
+In the probe function, driver opens the low level CE (qce_open), and
+registers the supported algorithms to the kernel crypto API layer.
+Currently, qcrypto supports the following algorithms.
+
+      ablkcipher -
+          cbc(aes),ecb(aes),ctr(aes)
+      ahash -
+          sha1, sha256
+      aead -
+          authenc(hmac(sha1),cbc(aes))
+
+      The hmac(sha1), hmac(sha256, authenc(hmac(sha1),cbc(aes)), ccm(aes)
+      and xts(aes) algorithms are registered for some platforms that
+      support these in the CE hardware
+
+The HW device  can support various algorithms. However, the most important
+algorithms to gain the performance using a HW crypto accelerator are
+AEAD, and ABLKCIPHER.
+
+AEAD stands for "authentication encryption with association data".
+ABLKCIPHER stands of "asynchronous block cipher".
+
+The AEAD structure is described in the following header file aead.h
+
+The design of the driver is to allow multiple requests
+issued from kernel client SW (eg IPSec).
+Therefore, the driver will have to internally queue the requests, and
+serialize the requests to the low level qce driver.
+
+When a request is received from the client, if there is no outstanding
+request, a qce (or qce40) request is issued, otherwise, the request is
+queued in the driver queue.
+
+On completion of a request, the qce (or qce40) invokes the registered
+callback from the qcrypto.  The internal tasklet (done_tasklet) is scheduled
+in this callback function. The sole purpose of done_tasklet is
+to call the completion of the current active request, and
+issue more requests to the qce (or qce40), if any exists.
+
+A spin lock is used to protect the critical section of internal queue to
+be accessed from multiple tasks, SMP, and completion callback
+from qce.
+
+The driver maintains a set of statistics using debug fs. The files are
+in /debug/qcrypto/stats1, /debug/qcrypto/stats2, /debug/qcrypto/stats3;
+one for each instance of device. Reading the file associated with
+a device will retrieve the driver statistics for that device.
+Any write to the file will clear the statistics.
+
+Test vectors for  authenc(hmac(sha1),cbc(aes)) algorithm are
+developed offline, and imported to crypto/testmgr.c, and crypto/testmgr.h.
+
+
+Power Management
+================
+  none
+
+
+Interface:
+==========
+The kernel interface is defined in crypto.h.
+
+
+Module parameters:
+==================
+
+All the platform specific parameters are defined in the board init
+file, eg. arch/arm/mach-msm/board-mssm7x30.c for msm7x30.
+
+Dependencies:
+=============
+qce driver.
+
+
+User space utilities:
+=====================
+  n/a
+
+Known issues:
+=============
+  n/a
+
+To do:
+======
+  Add Hashing algorithms.
+
+
+Limitations:
+===============
+(1) Each packet transfer size (for cipher and hash) is limited to maximum of
+    32KB.  This is a limitation in the crypto engine hardware. Client will
+    have to break packets larger than 32KB into multiple requests of smaller
+    size data packets.
+
+(2) Do not load this driver if your device has user space apps that needs to
+    access the crypto hardware. Please make sure to have the qcrypto module
+    disabled/unloaded.
+    Not having the driver loaded, will result in the kernel space apps to use
+    the registered software implementation of the crypto algorithms.
+
+(3) If your device has Playready application enabled and uses the qcedev module
+    to access the crypto hardware accelerator, please be informed that for
+    performance reasons, the CE hardware will need to be dedicated to playready
+    application.  Any other user space or kernel application should be implemented
+    to use the software implementation of the crypto algorithms.
+
+    (NOTE:  Please refer to details on the limitations listed in qce/40.txt)
diff --git a/Documentation/devicetree/bindings/arm/msm/tz-log.txt b/Documentation/devicetree/bindings/arm/msm/tz-log.txt
new file mode 100644
index 0000000..d7e84a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/tz-log.txt
@@ -0,0 +1,24 @@
+* TZLOG (Trust Zone Log)
+
+The tz_log driver is a platform device driver that exposes a debugfs
+interface for accessing and displaying diagnostic information
+related to secure code (Trustzone/QSEE).
+
+Required properties:
+- compatible : Should be "qcom,tz-log"
+- reg        : Offset and size of the register set for the device
+
+Optional properties:
+- qcom,hyplog-enabled   : (boolean) indicates if driver supports HYP logger service.
+- hyplog-address-offset : Register offset to get the HYP log base address.
+- hyplog-size-offset    : Register offset to get the HYP log size parameter.
+
+Example:
+
+	qcom,tz-log@fe805720 {
+		compatible = "qcom,tz-log";
+                reg = <0xfe805720 0x1000>;
+		qcom,hyplog-enabled;
+		hyplog-address-offset = 0x410;
+		hyplog-size-offset = 0x414;
+	};
diff --git a/Documentation/devicetree/bindings/crypto/msm/ice.txt b/Documentation/devicetree/bindings/crypto/msm/ice.txt
new file mode 100644
index 0000000..2d0e580
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/msm/ice.txt
@@ -0,0 +1,32 @@
+* Inline Crypto Engine (ICE)
+
+Required properties:
+  - compatible : should be "qcom,ice"
+  - reg : <register mapping>
+
+Optional properties:
+  - interrupt-names     : name describing the interrupts for ICE IRQ
+  - interrupts          : <interrupt mapping for ICE IRQ>
+  - qcom,enable-ice-clk : should enable clocks for ICE HW
+  - clocks              : List of phandle and clock specifier pairs
+  - clock-names         : List of clock input name strings sorted in the same
+                          order as the clocks property.
+  - qocm,op-freq-hz     : max clock speed sorted in the same order as the clocks
+                          property.
+  - qcom,instance-type  : describe the storage type for which ICE node is defined
+			  currently, only "ufs" and "sdcc" are supported storage type
+
+Example:
+        ufs_ice: ufsice@630000 {
+                compatible = "qcom,ice";
+                reg = <0x630000 0x8000>;
+                interrupt-names = "ufs_ice_nonsec_level_irq", "ufs_ice_sec_level_irq";
+                interrupts = <0 258 0>, <0 257 0>;
+                qcom,enable-ice-clk;
+                clock-names = "ice_core_clk_src", "ice_core_clk";
+                clocks = <&clock_gcc clk_ufs_ice_core_clk_src>,
+                         <&clock_gcc clk_gcc_ufs_ice_core_clk>;
+                qcom,op-freq-hz = <300000000>, <0>;
+		qcom,instance-type = "ufs";
+                status = "disabled";
+        };
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
new file mode 100644
index 0000000..c8077cb
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
@@ -0,0 +1,43 @@
+* QCEDEV (QTI Crypto Engine Device)
+
+Required properties:
+  - compatible : should be "qcom,qcedev"
+  - reg : should contain crypto, BAM register map.
+  - reg-names : should contain the crypto and bam base register names.
+  - interrupts : should contain crypto BAM interrupt.
+  - qcom,bam-pipe-pair : should contain crypto BAM pipe pair index.
+  - qcom,ce-hw-instance : should contain crypto HW instance.
+  - qcom,msm_bus,name: Should be "qcedev-noc"
+  - qcom,msm_bus,num_cases: Depends on the use cases for bus scaling
+  - qcom,msm_bus,active-only: Boolean flag for context of request (actve/dual)
+  - qcom,msm_bus,num_paths: The paths for source and destination ports
+  - qcom,msm_bus,vectors: Vectors for bus topology.
+  - qcom,ce-device: Device number.
+  - qcom,ce-opp-freq: indicates the CE operating frequency in Hz, changes from target to target.
+
+Optional properties:
+  - qcom,ce-hw-shared : optional, indicates if the hardware is shared between EE.
+  - qcom,ce-hw-key : optional, indicates if the hardware supports use of HW KEY.
+  - qcom,support-core-clk-only : optional, indicates if the HW supports single crypto core clk.
+  - qcom,bsm-ee : optional, indicate the BAM EE value, changes from target to target. Default value is 1 if not specified.
+
+Example:
+
+	qcom,qcedev@fd440000 {
+		compatible = "qcom,qcedev";
+		reg = <0xfd440000 0x20000>,
+			<0xfd444000 0x8000>;
+		reg-names = "crypto-base","crypto-bam-base";
+		interrupts = <0 235 0>;
+		qcom,bam-pipe-pair = <0>;
+		qcom,ce-hw-instance = <1>;
+		qcom,ce-device = <0>;
+		qcom,ce-hw-shared;
+                qcom,msm-bus,name = "qcedev-noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<56 512 0 0>,
+				<56 512 3936000 393600>,
+		qcom,ce-opp-freq = <100000000>;
+	};
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcota.txt b/Documentation/devicetree/bindings/crypto/msm/qcota.txt
new file mode 100644
index 0000000..3ce63af
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/msm/qcota.txt
@@ -0,0 +1,42 @@
+* QCOTA (Over The Air Crypto Device)
+
+Required properties:
+  - compatible : should be "qcom,qcota"
+  - reg : should contain crypto, BAM register map.
+  - reg-names : should contain the crypto and bam base register names.
+  - interrupts : should contain crypto BAM interrupt.
+  - qcom,bam-pipe-pair : should contain crypto BAM pipe pair index.
+  - qcom,ce-hw-instance : should contain crypto HW instance.
+  - qcom,ce-device: Unique QCOTA device identifier. 0 for first
+			instance, 1 for second instance, n-1 for n-th instance.
+  - qcom,ce-opp-freq: indicates the CE operating frequency in Hz, changes from target to target.
+
+Optional properties:
+  - qcom,support-core-clk-only : optional, indicates if the HW supports single crypto core clk.
+  - qcom,bsm-ee : optional, indicate the BAM EE value, changes from target to target.Default value is 1 if not specified.
+
+Example:
+
+	qcom,qcota@fe140000 {
+		compatible = "qcom,qcota";
+		reg = <0xfe140000 0x20000>,
+			<0xfe144000 0x8000>;
+		reg-names = "crypto-base","crypto-bam-base";
+		interrupts = <0 111 0>;
+		qcom,bam-pipe-pair = <1>;
+		qcom,ce-hw-instance = <2>;
+		qcom,ce-device = <0>;
+		qcom,ce-opp-freq = <100000000>;
+	};
+
+	qcom,qcota@fe0c0000 {
+		compatible = "qcom,qcota";
+		reg = <0xfe0c0000 0x20000>,
+			<0xfe0c4000 0x8000>;
+		reg-names = "crypto-base","crypto-bam-base";
+		interrupts = <0 113 0>;
+		qcom,bam-pipe-pair = <1>;
+		qcom,ce-hw-instance = <4>;
+		qcom,ce-device = <1>;
+		qcom,ce-opp-freq = <100000000>;
+	};
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
new file mode 100644
index 0000000..06b219a
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
@@ -0,0 +1,61 @@
+* QCRYPTO (QTI Crypto)
+
+Required properties:
+  - compatible : should be "qcom,qcrypto"
+  - reg : should contain crypto, BAM register map.
+  - reg-names : should contain the crypto and bam base register names.
+  - interrupts : should contain crypto BAM interrupt.
+  - qcom,bam-pipe-pair : should contain crypto BAM pipe pair index.
+  - qcom,ce-hw-instance : should contain crypto HW instance.
+  - qcom,msm_bus,name: Should be "qcrypto-noc"
+  - qcom,msm_bus,num_cases: Depends on the use cases for bus scaling
+  - qcom,msm_bus,active-only: Boolean flag for context of request (actve/dual)
+  - qcom,msm_bus,num_paths: The paths for source and destination ports
+  - qcom,ce-device: Device number. Device number is encoded with the following:
+		bit 3-0   device type:	0 for full disk encryption(fde)
+					1 for per file encrption(pfe)
+		bit 7-4   unit number within the device type.
+
+
+Optional properties:
+  - qcom,ce-hw-shared : optional, indicates if the hardware is shared between EE.
+  - qcom,ce-hw-key : optional, indicates if the hardware supports use of HW KEY.
+  - qcom,use-sw-aes-cbc-ecb-ctr-algo : optional, indicates if use SW aes-cbc/ecb/ctr algorithm.
+  - qcom,use-sw-aes-xts-algo : optional, indicates if use SW aes-xts algorithm.
+  - qcom,use-sw-aead-algo : optional, indicates if use SW aead algorithm.
+  - qcom,use-sw-ahash-algo : optional, indicates if use SW hash algorithm.
+  - qcom,use-sw-hmac-algo : optional, indicates if use SW hmac algorithm.
+  - qcom,use-sw-aes-ccm-algo : optional, indicates if use SW aes-ccm algorithm.
+  - qcom,clk-mgmt-sus-res : optional, indicate if the ce clocks need to be disabled/enabled in suspend/resume function.
+  - qcom,support-core-clk-only : optional, indicates if the HW supports single crypto core clk.
+  - qcom,bsm-ee : optional, indicate the BAM EE value, changes from target to target.Default value is 1 if not specified.
+
+  - qcom,ce-opp-freq: optional, indicates the CE operating frequency in Hz,
+	changes from target to target. If not specified, by default the
+	frequency is set as 100MHZ.
+
+  - qcom,msm_bus,vectors: optional, indicates vectors for bus topology.
+        This attribute is required for msm targets where bus scaling is
+	required. For other targets such as fsm, they do not perform
+	bus scaling. It is not required for those targets.
+
+Example:
+
+        qcom,qcrypto@fd444000 {
+		compatible = "qcom,qcrypto";
+		reg = <0xfd440000 0x20000>,
+		      <0xfd444000 0x8000>;
+		reg-names = "crypto-base","crypto-bam-base";
+		interrupts = <0 235 0>;
+		qcom,bam-pipe-pair = <1>;
+		qcom,ce-hw-instance = <1>;
+		qcom,ce-device = <0>;
+		qcom,ce-hw-shared;
+                qcom,msm-bus,name = "qcrypto-noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<56 512 0 0>,
+				<56 512 3936000 393600>,
+		qcom,ce-opp-freq = <100000000>;
+	};
diff --git a/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt b/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
index af0b903..dfc14f7 100644
--- a/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
+++ b/Documentation/devicetree/bindings/extcon/extcon-usb-gpio.txt
@@ -5,7 +5,10 @@
 
 Required properties:
 - compatible: Should be "linux,extcon-usb-gpio"
+
+Either one of id-gpio or vbus-gpio must be present. Both can be present as well.
 - id-gpio: gpio for USB ID pin. See gpio binding.
+- vbus-gpio: gpio for USB VBUS pin.
 
 Example: Examples of extcon-usb-gpio node in dra7-evm.dts as listed below:
 	extcon_usb1 {
diff --git a/Documentation/devicetree/bindings/fb/mdss-pll.txt b/Documentation/devicetree/bindings/fb/mdss-pll.txt
index b028dda..d0d7fff 100644
--- a/Documentation/devicetree/bindings/fb/mdss-pll.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-pll.txt
@@ -15,7 +15,7 @@
                         "qcom,mdss_hdmi_pll_8996_v2", "qcom,mdss_dsi_pll_8996_v2",
                         "qcom,mdss_hdmi_pll_8996_v3", "qcom,mdss_hdmi_pll_8996_v3_1p8",
                         "qcom,mdss_edp_pll_8996_v3", "qcom,mdss_edp_pll_8996_v3_1p8",
-                        "qcom,mdss_dsi_pll_8998", "qcom,mdss_dp_pll_8998",
+                        "qcom,mdss_dsi_pll_10nm", "qcom,mdss_dp_pll_8998",
                         "qcom,mdss_hdmi_pll_8998"
 - cell-index:		Specifies the controller used
 - reg:			offset and length of the register set for the device.
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt b/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
new file mode 100644
index 0000000..8e2bdee
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-cci.txt
@@ -0,0 +1,363 @@
+* Qualcomm Technologies, Inc. MSM CCI
+
+[First level nodes]
+Required properties:
+- cell-index: cci hardware core index
+- compatible :
+  - "qcom,cci"
+- reg : offset and length of the register set for the device
+  for the cci operating in compatible mode.
+- reg-names : should specify relevant names to each reg property defined.
+- interrupts : should contain the cci interrupt.
+- interrupt-names : should specify relevant names to each interrupts
+  property defined.
+- gpios : should contain phandle to gpio controller node and array of
+  #gpio-cells specifying specific gpio (controller specific)
+- qcom,gpio-req-tbl-num : should contain index to gpios specific to this sensor
+- qcom,gpio-req-tbl-flags : should contain direction of gpios present in
+  qcom,gpio-req-tbl-num property (in the same order)
+- qcom,gpio-req-tbl-label : should contain name of gpios present in
+  qcom,gpio-req-tbl-num property (in the same order)
+- clock-names: name of the clocks required for the device
+- clock-rates: clock rate in Hz
+
+Optional properties:
+- qcom,cam-vreg-name : name of the voltage regulators required for the device.
+- gdscr-supply : should contain gdsr regulator used for cci clocks.
+- mmagic-supply : should contain mmagic regulator used for mmagic clocks.
+
+- I2c speed settings (*)
+  - i2c_freq_100Khz: qcom,i2c_standard_mode - node should contain clock settings for
+    100Khz
+  - i2c_freq_400Khz: qcom,i2c_fast_mode - node should contain clock settings for
+    400Khz
+  - i2c_freq_custom: qcom,i2c_custom_mode - node can contain clock settings for
+    frequencies other than 100Khz and 400Khz which is specific to usecase.
+    Currently it has settings for 375Khz.
+  - i2c_freq_1Mhz: qcom,i2c_fast_plus_mode - node should contain clock
+    settings for 1Mhz
+* if speed settings is not defined the low level driver can use "i2c_freq_custom"
+like default
+
+[Second level nodes]
+* Qualcomm Technologies, Inc. CCI clock settings
+
+Optional properties:
+- qcom,hw-thigh : should contain high period of the SCL clock in terms of CCI
+    clock cycle
+- qcom,hw-tlow : should contain high period of the SCL clock in terms of CCI
+    clock cycle
+- qcom,hw-tsu-sto : should contain setup time for STOP condition
+- qcom,hw-tsu-sta : should contain setup time for Repeated START condition
+- qcom,hw-thd-dat : should contain hold time for the data
+- qcom,hw-thd-sta : should contain hold time for START condition
+- qcom,hw-tbuf : should contain free time between a STOP and a START condition
+- qcom,hw-scl-stretch-en : should contain enable or disable clock stretching
+- qcom,hw-trdhld : should contain internal hold time for SDA
+- qcom,hw-tsp : should contain filtering of glitches
+
+* Qualcomm Technologies, Inc. MSM Sensor
+
+MSM sensor node contains properties of camera sensor
+
+Required properties:
+- compatible : should be manufacturer name followed by sensor name
+  - "qcom,camera"
+- reg : should contain i2c slave address of the device
+- qcom,csiphy-sd-index : should contain csiphy instance that will used to
+  receive sensor data
+  - 0, 1, 2
+- cam_vdig-supply : should contain regulator from which digital voltage is
+  supplied
+- cam_vana-supply : should contain regulator from which analog voltage is
+  supplied
+- cam_vio-supply : should contain regulator from which IO voltage is supplied
+- qcom,cam-vreg-name : should contain names of all regulators needed by this
+  sensor
+  - "cam_vdig", "cam_vana", "cam_vio", "cam_vaf"
+- qcom,cam-vreg-min-voltage : should contain minimum voltage level for
+  regulators mentioned in qcom,cam-vreg-name property (in the same order)
+- qcom,cam-vreg-max-voltage : should contain maximum voltage level for
+  regulators mentioned in qcom,cam-vreg-name property (in the same order)
+- qcom,cam-vreg-op-mode : should contain optimum voltage level for regulators
+  mentioned in qcom,cam-vreg-name property (in the same order)
+- qcom,sensor-position-roll : should contain sensor rotational angle with respect
+  to axis of reference
+  - 0, 90, 180, 360
+- qcom,sensor-position-pitch : should contain sensor rotational angle with respect
+  to axis of reference
+  - 0, 90, 180, 360
+- qcom,sensor-position-yaw : should contain sensor rotational angle  with respect
+  to axis of reference
+  - 0, 90, 180, 360
+Optional properties:
+- qcom,slave-id : should contain i2c slave address, device id address, expected
+  id read value and device id mask
+- qcom,sensor-name : should contain unique sensor name to differentiate from
+  other sensor
+  - "s5k3l1yx"
+- qcom,sensor-mode : should contain sensor mode supported
+  - 0 -> back camera 2D
+  - 1 -> front camera 2D
+  - 2 -> back camera 3D
+  - 3 -> back camera int 3D
+- qcom,sensor-type : should contain format of data that sensor streams
+  - 0 -> bayer format
+  - 1 -> yuv format
+- qcom,secure : should be enabled to operate the camera in secure mode
+  - 0, 1
+- qcom,gpio-no-mux : should contain field to indicate whether gpio mux table is
+  available
+  - 1 if gpio mux is not available, 0 otherwise
+- cam_vaf-supply : should contain regulator from which AF voltage is supplied
+- gpios : should contain phandle to gpio controller node and array of
+   #gpio-cells specifying specific gpio (controller specific)
+- qcom,gpio-reset : should contain index to gpio used by sensors reset_n
+- qcom,gpio-standby : should contain index to gpio used by sensors standby_n
+- qcom,gpio-vio : should contain index to gpio used by sensors io vreg enable
+- qcom,gpio-vana : should contain index to gpio used by sensors analog vreg enable
+- qcom,gpio-vdig : should contain index to gpio used by sensors digital vreg enable
+- qcom,gpio-vaf : should contain index to gpio used by sensors af vreg enable
+- qcom,gpio-af-pwdm : should contain index to gpio used by sensors af pwdm_n
+- qcom,gpio-req-tbl-num : should contain index to gpios specific to this sensor
+- qcom,gpio-req-tbl-flags : should contain direction of gpios present in
+  qcom,gpio-req-tbl-num property (in the same order)
+- qcom,gpio-req-tbl-label : should contain name of gpios present in
+  qcom,gpio-req-tbl-num property (in the same order)
+- qcom,gpio-set-tbl-num : should contain index of gpios that need to be
+  configured by msm
+- qcom,gpio-set-tbl-flags : should contain value to be configured for the gpios
+  present in qcom,gpio-set-tbl-num property (in the same order)
+- qcom,gpio-set-tbl-delay : should contain amount of delay after configuring
+  gpios as specified in gpio_set_tbl_flags property (in the same order)
+- qcom,csi-phy-sel : should contain CSIPHY core instance from which CSID should
+  receive data
+- qcom,actuator-cam-name : should contain actuator cam name associated with
+  this sensor
+  - If actuator does not exist, this property should not be initialized
+  - If actuator exist, this field should indicate the index of actuator to
+    be used
+- qcom,actuator-vcm-pwd : should contain the gpio pin of vcm power to be enabled
+  for actuator
+- qcom,actuator-vcm-enable : should contain value to be set for actuator vcm
+  gpio
+- qcom,sensor-position : should contain the mount angle of the camera sensor
+  - 0 -> back camera
+  - 1 -> front camera
+- qcom,cci-master : should contain i2c master id to be used for this camera
+  sensor
+  - 0 -> MASTER 0
+  - 1 -> MASTER 1
+- qcom,actuator-src : if auto focus is supported by this sensor, this
+  property should contain phandle of respective actuator node
+- qcom,led-flash-src : if LED flash is supported by this sensor, this
+  property should contain phandle of respective LED flash node
+- qcom,vdd-cx-supply : should contain regulator from which cx voltage is
+  supplied
+- qcom,vdd-cx-name : should contain names of cx regulator
+- qcom,eeprom-src : if eeprom memory is supported by this sensor, this
+  property should contain phandle of respective eeprom nodes
+- qcom,ois-src : if optical image stabilization is supported by this sensor,
+  this property should contain phandle of respective ois node
+- qcom,ir-led-src : if ir led is supported by this sensor, this property
+  should contain phandle of respective ir-led node
+- qcom,ir-cut-src : if ir cut is supported by this sensor, this property
+  should contain phandle of respective ir-cut node
+- qcom,special-support-sensors: if only some special sensors are supported
+  on this board, add sensor name in this property.
+
+* Qualcomm Technologies, Inc. MSM ACTUATOR
+
+Required properties:
+- cell-index : should contain unique identifier to differentiate
+  between multiple actuators
+- reg : should contain i2c slave address of the actuator and length of
+  data field which is 0x0
+- compatible :
+  - "qcom,actuator"
+- qcom,cci-master : should contain i2c master id to be used for this camera
+  sensor
+  - 0 -> MASTER 0
+  - 1 -> MASTER 1
+Optional properties:
+- qcom,cam-vreg-name : should contain names of all regulators needed by this
+  actuator
+  - "cam_vaf"
+- qcom,cam-vreg-min-voltage : should contain minimum voltage level in mcrovolts
+  for regulators mentioned in qcom,cam-vreg-name property (in the same order)
+- qcom,cam-vreg-max-voltage : should contain maximum voltage level in mcrovolts
+  for regulators mentioned in qcom,cam-vreg-name property (in the same order)
+- qcom,cam-vreg-op-mode : should contain the maximum current in microamps
+  required from the regulators mentioned in the qcom,cam-vreg-name property
+  (in the same order).
+- cam_vaf-supply : should contain regulator from which AF voltage is supplied
+
+* Qualcomm Technologies, Inc. MSM OIS
+
+Required properties:
+- cell-index : should contain unique identifier to differentiate
+  between multiple ois drivers
+- reg : should contain i2c slave address of the ois and length of
+  data field which is 0x0
+- compatible :
+  - "qcom,ois"
+- qcom,cci-master : should contain i2c master id to be used for this camera
+  sensor
+  - 0 -> MASTER 0
+  - 1 -> MASTER 1
+
+Optional properties:
+- qcom,cam-vreg-name : should contain names of all regulators needed by this
+  ois
+  - "cam_vaf"
+- qcom,cam-vreg-min-voltage : should contain minimum voltage level in mcrovolts
+  for regulators mentioned in qcom,cam-vreg-name property (in the same order)
+- qcom,cam-vreg-max-voltage : should contain maximum voltage level in mcrovolts
+  for regulators mentioned in qcom,cam-vreg-name property (in the same order)
+- qcom,cam-vreg-op-mode : should contain the maximum current in microamps
+  required from the regulators mentioned in the qcom,cam-vreg-name property
+  (in the same order).
+- cam_vaf-supply : should contain regulator from which ois voltage is supplied
+
+Example:
+
+led_flash0: qcom,camera-flash@0 {
+    cell-index = <0>;
+    compatible = "qcom,camera-flash";
+    qcom,flash-source = <&pmi8994_flash0 &pmi8994_flash1>;
+    qcom,torch-source = <&pmi8998_torch0 &pmi8998_torch1>;
+    qcom,switch-source = <&pmi8998_switch>;
+    status = "ok";
+}
+
+qcom,cci@0xfda0c000 {
+    cell-index = <0>;
+    compatible = "qcom,cci";
+    reg = <0xfda0c000 0x300>;
+    reg-names = "cci";
+    interrupts = <0 50 0>;
+    interrupt-names = "cci";
+    clock-names = "camnoc_axi_clk", "soc_ahb_clk",
+         "slow_ahb_src_clk", "cpas_ahb_clk",
+         "cci_clk", "cci_clk_src";
+    qcom,clock-rates = <0 0 80000000 0 0 37500000>;
+    gpios = <&tlmm 17 0>,
+           <&tlmm 18 0>,
+           <&tlmm 19 0>,
+           <&tlmm 20 0>;
+    qcom,gpio-tbl-num = <0 1 2 3>;
+    qcom,gpio-tbl-flags = <1 1 1 1>;
+    qcom,gpio-tbl-label = "CCI_I2C_DATA0",
+                    "CCI_I2C_CLK0",
+                    "CCI_I2C_DATA1",
+                    "CCI_I2C_CLK1";
+    i2c_freq_100Khz: qcom,i2c_standard_mode {
+         qcom,hw-thigh = <78>;
+         qcom,hw-tlow = <114>;
+         qcom,hw-tsu-sto = <28>;
+         qcom,hw-tsu-sta = <28>;
+         qcom,hw-thd-dat = <10>;
+         qcom,hw-thd-sta = <77>;
+         qcom,hw-tbuf = <118>;
+         qcom,hw-scl-stretch-en = <0>;
+         qcom,hw-trdhld = <6>;
+         qcom,hw-tsp = <1>;
+         status = "ok";
+    };
+    i2c_freq_400Khz: qcom,i2c_fast_mode {
+         qcom,hw-thigh = <20>;
+         qcom,hw-tlow = <28>;
+         qcom,hw-tsu-sto = <21>;
+         qcom,hw-tsu-sta = <21>;
+         qcom,hw-thd-dat = <13>;
+         qcom,hw-thd-sta = <18>;
+         qcom,hw-tbuf = <25>;
+         qcom,hw-scl-stretch-en = <0>;
+         qcom,hw-trdhld = <6>;
+         qcom,hw-tsp = <3>;
+         status = "ok";
+    };
+    i2c_freq_custom: qcom,i2c_custom_mode {
+         qcom,hw-thigh = <15>;
+         qcom,hw-tlow = <28>;
+         qcom,hw-tsu-sto = <21>;
+         qcom,hw-tsu-sta = <21>;
+         qcom,hw-thd-dat = <13>;
+         qcom,hw-thd-sta = <18>;
+         qcom,hw-tbuf = <25>;
+         qcom,hw-scl-stretch-en = <1>;
+         qcom,hw-trdhld = <6>;
+         qcom,hw-tsp = <3>;
+         status = "ok";
+    };
+    i2c_freq_1Mhz: qcom,i2c_fast_plus_mode {
+         qcom,hw-thigh = <16>;
+         qcom,hw-tlow = <22>;
+         qcom,hw-tsu-sto = <17>;
+         qcom,hw-tsu-sta = <18>;
+         qcom,hw-thd-dat = <16>;
+         qcom,hw-thd-sta = <15>;
+         qcom,hw-tbuf = <19>;
+         qcom,hw-scl-stretch-en = <1>;
+         qcom,hw-trdhld = <3>;
+         qcom,hw-tsp = <3>;
+         qcom,cci-clk-src = <37500000>;
+         status = "ok";
+    };
+
+    actuator0: qcom,actuator@0 {
+         cell-index = <0>;
+         reg = <0x0>;
+         compatible = "qcom,actuator";
+         qcom,cci-master = <0>;
+         cam_vaf-supply = <&pmi8998_bob>;
+         qcom,cam-vreg-name = "cam_vaf";
+         qcom,cam-vreg-min-voltage = <2800000>;
+         qcom,cam-vreg-max-voltage = <2800000>;
+         qcom,cam-vreg-op-mode = <100000>;
+    };
+
+    qcom,cam-sensor@0 {
+         cell-index = <0>;
+         compatible = "qcom,camera";
+         reg = <0x0>;
+         qcom,csiphy-sd-index = <0>;
+         qcom,sensor-position-roll = <90>;
+         qcom,sensor-position-pitch = <0>;
+         qcom,sensor-position-yaw = <180>;
+         qcom,secure = <1>;
+         qcom,led-flash-src = <&led_flash0>;
+         qcom,actuator-src = <&actuator0>;
+         qcom,eeprom-src = <&eeprom0>;
+         cam_vdig-supply = <&pm845_s3>;
+         cam_vio-supply = <&pm845_lvs1>;
+         cam_vana-supply = <&pmi8998_bob>;
+         qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
+         qcom,cam-vreg-min-voltage = <0 3312000 1352000>;
+         qcom,cam-vreg-max-voltage = <0 3312000 1352000>;
+         qcom,cam-vreg-op-mode = <0 80000 105000>;
+         qcom,gpio-no-mux = <0>;
+         pinctrl-names = "cam_default", "cam_suspend";
+         pinctrl-0 = <&cam_sensor_mclk0_active
+                   &cam_sensor_rear_active>;
+         pinctrl-1 = <&cam_sensor_mclk0_suspend
+                   &cam_sensor_rear_suspend>;
+         gpios = <&tlmm 13 0>,
+              <&tlmm 80 0>,
+              <&tlmm 79 0>;
+         qcom,gpio-reset = <1>;
+         qcom,gpio-standby = <2>;
+         qcom,gpio-req-tbl-num = <0 1 2>;
+         qcom,gpio-req-tbl-flags = <1 0 0>;
+         qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+                         "CAM_RESET0",
+                         "CAM_VANA";
+         qcom,sensor-position = <0>;
+         qcom,sensor-mode = <0>;
+         qcom,cci-master = <0>;
+         status = "ok";
+         clocks = <&clock_mmss clk_mclk0_clk_src>,
+               <&clock_mmss clk_camss_mclk0_clk>;
+         clock-names = "cam_src_clk", "cam_clk";
+    };
+};
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt b/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt
new file mode 100644
index 0000000..e8a74b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/msm-cam-csiphy.txt
@@ -0,0 +1,34 @@
+* Qualcomm Technologies, Inc. MSM CSI Phy
+
+Required properties:
+- cell-index: csi phy hardware core index
+- compatible :
+  - "qcom,csiphy-v5.01"
+- reg : offset and length of the register set for the device
+  for the csiphy operating in compatible mode.
+- reg-names : should specify relevant names to each reg property defined.
+- interrupts : should contain the csiphy interrupt.
+- interrupt-names : should specify relevant names to each interrupts
+  property defined.
+- clock-names: name of the clocks required for the device
+- qcom,clock-rates: clock rate in Hz
+  - 0 if appropriate clock is required but doesn't have to apply the rate
+
+Example:
+
+qcom,csiphy@ac65000 {
+     cell-index = <0>;
+     compatible = "qcom,csiphy-v1.0", "qcom,csiphy";
+     reg = <0xac65000 0x200>;
+     reg-names = "csiphy";
+     interrupts = <0 477 0>;
+     interrupt-names = "csiphy";
+     clock-names = "camnoc_axi_clk", "soc_ahb_clk",
+              "slow_ahb_src_clk", "cpas_ahb_clk",
+              "cphy_rx_clk_src", "csiphy0_clk",
+              "csi0phytimer_clk_src", "csi0phytimer_clk",
+              "ife_0_csid_clk", "ife_0_csid_clk_src";
+     qcom,clock-rates =
+           <0 0 80000000 0 320000000 0 269333333 0 0 384000000>;
+     status = "ok";
+};
diff --git a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
index 058dab1..0295e1b 100644
--- a/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-sde-rotator.txt
@@ -84,6 +84,7 @@
 - qcom,mdss-rot-mode:		This is integer value indicates operation mode
 				of the rotator device
 - qcom,mdss-sbuf-headroom:	This integer value indicates stream buffer headroom in lines.
+- qcom,mdss-rot-linewidth:	This integer value indicates rotator line width supported in pixels.
 - cache-slice-names:		A set of names that identify the usecase names of a client that uses
 				cache slice. These strings are used to look up the cache slice
 				entries by name.
diff --git a/Documentation/devicetree/bindings/pci/msm_pcie.txt b/Documentation/devicetree/bindings/pci/msm_pcie.txt
index a50e0c2..fc019bd 100644
--- a/Documentation/devicetree/bindings/pci/msm_pcie.txt
+++ b/Documentation/devicetree/bindings/pci/msm_pcie.txt
@@ -79,8 +79,12 @@
     PCIe port PHY.
     Should be specified in groups (offset, value, delay).
   - qcom,use-19p2mhz-aux-clk: The frequency of PCIe AUX clock is 19.2MHz.
-  - qcom,ep-wakeirq: The endpoint will issue wake signal when it is up, and the
-    root complex has the capability to enumerate the endpoint for this case.
+  - qcom,boot-option: Bits that alter PCIe bus driver boot sequence.
+    Below details what happens when each bit is set
+	BIT(0): PCIe bus driver will not start enumeration during its probe.
+		Clients will control when PCIe bus driver should do enumeration.
+	BIT(1): PCIe bus driver will not start enumeration if it receives a WAKE
+		interrupt.
   - qcom,msi-gicm-addr: MSI address for GICv2m.
   - qcom,msi-gicm-base: MSI IRQ base for GICv2m.
   - qcom,ext-ref-clk: The reference clock is external.
@@ -263,7 +267,7 @@
 		qcom,aux-clk-sync;
 		qcom,n-fts = <0x50>;
 		qcom,pcie-phy-ver = <1>;
-		qcom,ep-wakeirq;
+		qcom,boot-option = <0x1>;
 		qcom,msi-gicm-addr = <0xf9040040>;
 		qcom,msi-gicm-base = <0x160>;
 		qcom,ext-ref-clk;
diff --git a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
index c7024e0..d8934c0 100644
--- a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa.txt
@@ -10,9 +10,13 @@
 - qcom,ipa-loaduC: indicate that ipa uC should be loaded
 - qcom,ipa-advertise-sg-support: determine how to respond to a query
 regarding scatter-gather capability
+- qcom,ipa-napi-enable: Boolean context flag to indicate whether
+                        to enable napi framework or not
+- qcom,wan-rx-desc-size: size of WAN rx desc fifo ring, default is 256
 
 Example:
 	qcom,rmnet-ipa {
 		compatible = "qcom,rmnet-ipa";
+		qcom,wan-rx-desc-size = <256>;
 	}
 
diff --git a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
index 3f55312..e9575f1 100644
--- a/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
+++ b/Documentation/devicetree/bindings/platform/msm/rmnet_ipa3.txt
@@ -10,9 +10,13 @@
 - qcom,ipa-loaduC: indicate that ipa uC should be loaded
 - qcom,ipa-advertise-sg-support: determine how to respond to a query
 regarding scatter-gather capability
+- qcom,ipa-napi-enable: Boolean context flag to indicate whether
+                        to enable napi framework or not
+- qcom,wan-rx-desc-size: size of WAN rx desc fifo ring, default is 256
 
 Example:
 	qcom,rmnet-ipa3 {
 		compatible = "qcom,rmnet-ipa3";
+		qcom,wan-rx-desc-size = <256>;
 	}
 
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
index 9638888..addb0a6 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
@@ -154,6 +154,20 @@
 		    asleep and the battery is discharging. This option requires
 		    qcom,fg-esr-timer-awake to be defined.
 
+- qcom,fg-esr-pulse-thresh-ma
+	Usage:      optional
+	Value type: <u32>
+	Definition: ESR pulse qualification threshold in mA. If this is not
+		    specified, a default value of 110 mA will be configured.
+		    Allowed values are from 1 to 997.
+
+- qcom,fg-esr-meas-curr-ma
+	Usage:      optional
+	Value type: <u32>
+	Definition: ESR measurement current in mA. If this is not specified,
+		    a default value of 120 mA will be configured. Allowed
+		    values are 60, 120, 180 and 240.
+
 - qcom,cycle-counter-en
 	Usage:      optional
 	Value type: <empty>
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
index c8f2a5a..92ef23c 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
@@ -31,6 +31,12 @@
 	revid module. This is used to identify
 	the SMB subtype.
 
+- qcom,parallel-mode
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies parallel charging mode. If not specified, MID-MID
+	      option is selected by default.
+
 - qcom,suspend-input
   Usage:      optional
   Value type: <empty>
diff --git a/Documentation/devicetree/bindings/qseecom/qseecom.txt b/Documentation/devicetree/bindings/qseecom/qseecom.txt
new file mode 100644
index 0000000..8fbf8e2
--- /dev/null
+++ b/Documentation/devicetree/bindings/qseecom/qseecom.txt
@@ -0,0 +1,85 @@
+* QSEECOM (QTI Secure Execution Environment Communicator)
+
+Required properties:
+- compatible : Should be "qcom,qseecom"
+- reg : should contain memory region address reserved for loading secure apps.
+- qcom,disk-encrypt-pipe-pair : indicates what CE HW pipe pair is used for disk encryption
+- qcom,file-encrypt-pipe-pair : indicates what CE HW pipe pair is used for file encryption
+- qcom,support-multiple-ce-hw-instance : indicates if multicore CE support is supported.
+- qcom,hlos-num-ce-hw-instances : indicates number of CE HW instances hlos can use.
+- qcom,hlos-ce-hw-instance : indicates what CE HW is used by HLOS crypto driver
+- qcom,qsee-ce-hw-instance : indicates what CE HW is used by secure domain (TZ) crypto driver
+- qcom, msm_bus,name: Should be "qseecom-noc"
+- qcom, msm_bus,num_cases: Depends on the use cases for bus scaling
+- qcom, msm_bus,num_paths: The paths for source and destination ports
+- qcom, msm_bus,vectors: Vectors for bus topology.
+- qcom,ce-opp-freq: indicates the CE operating frequency in Hz, changes from target to target.
+- qcom,full-disk-encrypt-info : Vectors defining full disk encryption unit, crypto engine, pipe pair configuration in <unit#, ce#, pipe-pair#>
+- qcom,per-file-encrypt-info : Vectors defining per file encryption unit, crypto engine, pipe pair configuration in <unit#, ce#, pipe-pair#>
+
+Optional properties:
+  - qcom,support-bus-scaling : indicates if driver support scaling the bus for crypto operation.
+  - qcom,support-fde : indicates if driver support key managing for full disk encryption feature.
+  - qcom,support-pfe : indicates if driver support key managing for per file encryption feature.
+  - qcom,no-clock-support : indicates clocks are not handled by qseecom (could be handled by RPM)
+  - qcom,appsbl-qseecom-support : indicates if there is qseecom support in appsbootloader
+  - vdd-hba-supply   : handle for fixed power regulator
+  - qcom,qsee-reentrancy-support: indicates the qsee reentrancy phase supported by the target
+  - qcom,commonlib64-loaded-by-uefi: indicates commonlib64 is loaded by uefi already
+  - qcom,fde-key-size: indicates which FDE key size is used in device.
+
+Example:
+	qcom,qseecom@fe806000 {
+		compatible = "qcom,qseecom";
+		reg = <0x7f00000 0x500000>;
+		reg-names = "secapp-region";
+                qcom,disk-encrypt-pipe-pair = <2>;
+                qcom,file-encrypt-pipe-pair = <0>;
+		qcom,support-multiple-ce-hw-instance;
+		qcom,hlos-num-ce-hw-instances = <2>;
+		qcom,hlos-ce-hw-instance = <1 2>;
+		qcom,qsee-ce-hw-instance = <0>;
+		qcom,support-fde;
+		qcom,support-pfe;
+		qcom,msm_bus,name = "qseecom-noc";
+		qcom,msm_bus,num_cases = <4>;
+		qcom,msm_bus,active_only = <0>;
+		qcom,msm_bus,num_paths = <1>;
+		qcom,no-clock-support;
+		qcom,appsbl-qseecom-support;
+                qcom,fde-key-size;
+		qcom,msm_bus,vectors =
+			<55 512 0 0>,
+			<55 512 3936000000 393600000>,
+			<55 512 3936000000 393600000>,
+			<55 512 3936000000 393600000>;
+		qcom,ce-opp-freq = <100000000>;
+		vdd-hba-supply = <&gdsc_ufs>;
+	};
+
+Example: The following dts setup is the same as the example above.
+
+	qcom,qseecom@fe806000 {
+		compatible = "qcom,qseecom";
+		reg = <0x7f00000 0x500000>;
+		reg-names = "secapp-region";
+		qcom,support-fde;
+		qcom,full-disk-encrypt-info = <0 1 2>, <0 2 2>;
+		qcom,support-pfe;
+		qcom,per-file-encrypt-info = <0 1 0>, <0 2 0>;
+		qcom,qsee-ce-hw-instance = <0>;
+		qcom,msm_bus,name = "qseecom-noc";
+		qcom,msm_bus,num_cases = <4>;
+		qcom,msm_bus,active_only = <0>;
+		qcom,msm_bus,num_paths = <1>;
+		qcom,no-clock-support;
+		qcom,appsbl-qseecom-support;
+                qcom,fde-key-size;
+		qcom,msm_bus,vectors =
+			<55 512 0 0>,
+			<55 512 3936000000 393600000>,
+			<55 512 3936000000 393600000>,
+			<55 512 3936000000 393600000>;
+		qcom,ce-opp-freq = <100000000>;
+		vdd-hba-supply = <&gdsc_ufs>;
+	};
diff --git a/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt
new file mode 100644
index 0000000..ceac719
--- /dev/null
+++ b/Documentation/devicetree/bindings/spmi/qcom,spmi-pmic-arb-debug.txt
@@ -0,0 +1,63 @@
+Qualcomm Technologies, Inc. SPMI Debug Controller (PMIC Arbiter)
+
+The SPMI PMIC Arbiter is found on various QTI chips.  It is an SPMI controller
+with wrapping arbitration logic to allow for multiple on-chip devices to control
+a single SPMI master.
+
+The PMIC Arbiter debug bus is present starting at arbiter version 5.  It has
+read and write access to all PMIC peripherals regardless of ownership
+configurations.  It cannot be used on production devices because it is disabled
+by an eFuse.
+
+See spmi.txt for the generic SPMI controller binding requirements for child
+nodes.
+
+Supported Properties:
+
+- compatible
+	Usage:      required
+	Value type: <string>
+	Definition: Must be "qcom,spmi-pmic-arb-debug".
+
+- reg
+	Usage:      required
+	Value type: <prop-encoded-array>
+	Definition: List of address and size pairs.  The address of the PMIC
+		    arbiter module is required.  The address of the debug bus
+		    disabling fuse is optional.
+
+- reg-names
+	Usage:      required
+	Value type: <stringlist>
+	Definition: Address names. Must include "core" for the PMIC arbiter
+		    module and may include "fuse" for the debug bus disabling
+		    fuse.  The strings must be specified in the same order as
+		    the corresponding addresses are specified in the reg
+		    property.
+
+- #address-cells
+	Usage:      required
+	Value type: <u32>
+	Definition: Must be 2.
+
+- #size-cells
+	Usage:      required
+	Value type: <u32>
+	Definition: Must be 0.
+
+- qcom,fuse-disable-bit
+	Usage:      required if "fuse" is listed in reg-names property
+	Value type: <u32>
+	Definition: The bit within the fuse register which is set when the debug
+		    bus is not available.  Supported values are 0 to 31.
+
+Example:
+
+qcom,spmi-debug@6b22000 {
+	compatible = "qcom,spmi-pmic-arb-debug";
+	reg = <0x6b22000 0x60>, <0x7820A8 4>;
+	reg-names = "core", "fuse";
+	qcom,fuse-disable-bit = <12>;
+	#address-cells = <2>;
+	#size-cells = <0>;
+};
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index 8e5782a..e508a4f 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -4,12 +4,12 @@
 
 Required properties:
  - compatible: Should be "qcom,usb-ssphy-qmp", "qcom,usb-ssphy-qmp-v1" or
-   "qcom,usb-ssphy-qmp-v2"
+   "qcom,usb-ssphy-qmp-v2" or "qcom,usb-ssphy-qmp-dp-combo"
  - reg: Address and length of the register set for the device
    Required regs are:
    "qmp_phy_base" : QMP PHY Base register set.
  - "vls_clamp_reg" : top-level CSR register to be written to enable phy vls
-   clamp which allows phy to detect autonomous mode.
+   clamp which allows phy to detect autonomous mode. (optional for USB DP PHY)
  - <supply-name>-supply: phandle to the regulator device tree node
    Required "supply-name" examples are:
 	"vdd" : vdd supply for SSPHY digital circuit operation
@@ -24,13 +24,28 @@
  - qcom,qmp-phy-init-seq: QMP PHY initialization sequence with reg offset, its
    value, delay after register write. It is not must property to have for emulation.
  - qcom,qmp-phy-reg-offset: Provides important phy register offsets in an order
-   defined in the phy driver. Provide below mentioned register offsets in order:
+   defined in the phy driver.
+   Provide below mentioned register offsets in order for non USB DP combo PHY:
    USB3_PHY_PCS_STATUS,
    USB3_PHY_AUTONOMOUS_MODE_CTRL,
    USB3_PHY_LFPS_RXTERM_IRQ_CLEAR,
    USB3_PHY_POWER_DOWN_CONTROL,
    USB3_PHY_SW_RESET,
    USB3_PHY_START
+
+   In addion to above following set of registers offset needed for USB DP combo PHY in mentioned order:
+   USB3_DP_DP_PHY_PD_CTL,
+   USB3_DP_COM_POWER_DOWN_CTRL,
+   USB3_DP_COM_SW_RESET,
+   USB3_DP_COM_RESET_OVRD_CTRL,
+   USB3_DP_COM_PHY_MODE_CTRL,
+   USB3_DP_COM_TYPEC_CTRL,
+   USB3_DP_COM_SWI_CTRL,
+   USB3_PCS_MISC_CLAMP_ENABLE
+
+   Optional register for configuring USB Type-C port select if available:
+   USB3_PHY_PCS_MISC_TYPEC_CTRL
+
 - resets: reset specifier pair consists of phandle for the reset controller
   and reset lines used by this controller.
 - reset-names: reset signal name strings sorted in the same order as the resets
diff --git a/Makefile b/Makefile
index e70a1eb..e75a1d9 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 4
 PATCHLEVEL = 9
-SUBLEVEL = 20
+SUBLEVEL = 21
 EXTRAVERSION =
 NAME = Roaring Lionus
 
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index ae4b388..4616452 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -66,14 +66,14 @@
 		timer@20200 {
 			compatible = "arm,cortex-a9-global-timer";
 			reg = <0x20200 0x100>;
-			interrupts = <GIC_PPI 11 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_PPI 11 IRQ_TYPE_EDGE_RISING>;
 			clocks = <&periph_clk>;
 		};
 
 		local-timer@20600 {
 			compatible = "arm,cortex-a9-twd-timer";
 			reg = <0x20600 0x100>;
-			interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_HIGH>;
+			interrupts = <GIC_PPI 13 IRQ_TYPE_EDGE_RISING>;
 			clocks = <&periph_clk>;
 		};
 
diff --git a/arch/arm/mach-bcm/bcm_5301x.c b/arch/arm/mach-bcm/bcm_5301x.c
index c8830a2..fe067f6 100644
--- a/arch/arm/mach-bcm/bcm_5301x.c
+++ b/arch/arm/mach-bcm/bcm_5301x.c
@@ -9,14 +9,42 @@
 #include <asm/hardware/cache-l2x0.h>
 
 #include <asm/mach/arch.h>
+#include <asm/siginfo.h>
+#include <asm/signal.h>
+
+#define FSR_EXTERNAL		(1 << 12)
+#define FSR_READ		(0 << 10)
+#define FSR_IMPRECISE		0x0406
 
 static const char *const bcm5301x_dt_compat[] __initconst = {
 	"brcm,bcm4708",
 	NULL,
 };
 
+static int bcm5301x_abort_handler(unsigned long addr, unsigned int fsr,
+				  struct pt_regs *regs)
+{
+	/*
+	 * We want to ignore aborts forwarded from the PCIe bus that are
+	 * expected and shouldn't really be passed by the PCIe controller.
+	 * The biggest disadvantage is the same FSR code may be reported when
+	 * reading non-existing APB register and we shouldn't ignore that.
+	 */
+	if (fsr == (FSR_EXTERNAL | FSR_READ | FSR_IMPRECISE))
+		return 0;
+
+	return 1;
+}
+
+static void __init bcm5301x_init_early(void)
+{
+	hook_fault_code(16 + 6, bcm5301x_abort_handler, SIGBUS, BUS_OBJERR,
+			"imprecise external abort");
+}
+
 DT_MACHINE_START(BCM5301X, "BCM5301X")
 	.l2c_aux_val	= 0,
 	.l2c_aux_mask	= ~0,
 	.dt_compat	= bcm5301x_dt_compat,
+	.init_early	= bcm5301x_init_early,
 MACHINE_END
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index e46907c..33f3cc6 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1925,7 +1925,11 @@
 {
 	struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);
 	dma_addr_t dma_addr;
-	int ret, prot, len = PAGE_ALIGN(size + offset);
+	int ret, prot, len, start_offset, map_offset;
+
+	map_offset = offset & ~PAGE_MASK;
+	start_offset = offset & PAGE_MASK;
+	len = PAGE_ALIGN(map_offset + size);
 
 	dma_addr = __alloc_iova(mapping, len);
 	if (dma_addr == DMA_ERROR_CODE)
@@ -1933,11 +1937,12 @@
 
 	prot = __dma_direction_to_prot(dir);
 
-	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, prot);
+	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page) +
+			start_offset, len, prot);
 	if (ret < 0)
 		goto fail;
 
-	return dma_addr + offset;
+	return dma_addr + map_offset;
 fail:
 	__free_iova(mapping, dma_addr, len);
 	return DMA_ERROR_CODE;
diff --git a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-2800mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-2800mah.dtsi
new file mode 100644
index 0000000..a83d860
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-2800mah.dtsi
@@ -0,0 +1,81 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+qcom,ascent_2800mah {
+	/* #Ascent_860_82912_0000_2800mAh_averaged_MasterSlave_Jan11th2017*/
+	qcom,max-voltage-uv = <4350000>;
+	qcom,fg-cc-cv-threshold-mv = <4340>;
+	qcom,fastchg-current-ma = <2800>;
+	qcom,batt-id-kohm = <20>;
+	qcom,battery-beta = <3450>;
+	qcom,battery-type = "ascent_2800mah_averaged_masterslave_jan11th2017";
+	qcom,checksum = <0x0110>;
+	qcom,gui-version = "PMI8998GUI - 2.0.0.54";
+	qcom,fg-profile-data = [
+		 21 21 F5 0D
+		 82 0B 6E 05
+		 0C 1D 5F FA
+		 74 06 97 01
+		 0E 18 F7 22
+		 A8 45 B1 52
+		 76 00 00 00
+		 0E 00 00 00
+		 00 00 3D C4
+		 6E CD 2A CB
+		 21 00 08 00
+		 28 D3 2E E5
+		 0E 06 BA F3
+		 59 E3 22 12
+		 08 E5 54 32
+		 22 06 09 20
+		 27 00 14 00
+		 4B 20 F6 04
+		 CF 0A 04 06
+		 25 1D B7 FA
+		 DD F4 BB 06
+		 FE 18 E1 22
+		 73 45 32 53
+		 5F 00 00 00
+		 0E 00 00 00
+		 00 00 D5 D5
+		 9C CC 8E D3
+		 1A 00 00 00
+		 6E EA 2E E5
+		 6E 06 A9 00
+		 6D F5 73 0B
+		 2A 02 61 1B
+		 B1 33 CC FF
+		 07 10 00 00
+		 14 0B 99 45
+		 1A 00 40 00
+		 7D 01 0A FA
+		 FF 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+	];
+};
diff --git a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
new file mode 100644
index 0000000..c7cecbc
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
@@ -0,0 +1,81 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+qcom,ascent_3450mah {
+	/* Ascent_with_connector_3450mAh_averaged_MasterSlave_Jan6th2017 */
+	qcom,max-voltage-uv = <4350000>;
+	qcom,fg-cc-cv-threshold-mv = <4340>;
+	qcom,fastchg-current-ma = <3450>;
+	qcom,batt-id-kohm = <60>;
+	qcom,battery-beta = <3435>;
+	qcom,battery-type = "ascent_3450mah_averaged_masterslave_jan6th2017";
+	qcom,checksum = <0x96AC>;
+	qcom,gui-version = "PMI8998GUI - 2.0.0.54";
+	qcom,fg-profile-data = [
+		 9C 1F 85 05
+		 82 0A 73 FC
+		 2B 1D 72 EA
+		 EE 03 66 0C
+		 C8 17 F4 22
+		 E0 45 1F 52
+		 5C 00 00 00
+		 10 00 00 00
+		 00 00 4A C4
+		 C7 BC 48 C2
+		 0F 00 08 00
+		 E1 DA 5D ED
+		 8D FD B2 F3
+		 96 E2 A7 12
+		 7E F4 0E 3B
+		 24 06 09 20
+		 27 00 14 00
+		 83 1F EE 05
+		 1F 0A 45 FD
+		 6B 1D 53 E5
+		 EC 0B 31 14
+		 44 18 49 23
+		 18 45 A6 53
+		 55 00 00 00
+		 0E 00 00 00
+		 00 00 61 CC
+		 B7 C3 0F BC
+		 0F 00 00 00
+		 92 00 5D ED
+		 E3 06 E0 00
+		 75 FD 9C 03
+		 47 DB B3 22
+		 CB 33 CC FF
+		 07 10 00 00
+		 99 0D 99 45
+		 0F 00 40 00
+		 AB 01 0A FA
+		 FF 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+	];
+};
diff --git a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-demo-6000mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-demo-6000mah.dtsi
new file mode 100644
index 0000000..1e8cd16
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-demo-6000mah.dtsi
@@ -0,0 +1,78 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+qcom,demo_6000mah {
+	qcom,max-voltage-uv = <4350000>;
+	qcom,fg-cc-cv-threshold-mv = <4340>;
+	qcom,fastchg-current-ma = <6000>;
+	qcom,batt-id-kohm = <75>;
+	qcom,battery-beta = <3435>;
+	qcom,battery-type = "Demo_battery_6000mah";
+	qcom,fg-profile-data = [
+		 2C 1F 3F FC
+		 E9 03 A1 FD
+		 58 1D FD F5
+		 27 12 2C 14
+		 3F 18 FF 22
+		 9B 45 A3 52
+		 55 00 00 00
+		 0E 00 00 00
+		 00 00 1C AC
+		 F7 CD 71 B5
+		 1A 00 0C 00
+		 3C EB 54 E4
+		 EC 05 7F FA
+		 76 05 F5 02
+		 CA F3 82 3A
+		 2A 09 40 40
+		 07 00 05 00
+		 58 1F 42 06
+		 85 03 35 F4
+		 4D 1D 37 F2
+		 23 0A 79 15
+		 B7 18 32 23
+		 26 45 72 53
+		 55 00 00 00
+		 0D 00 00 00
+		 00 00 13 CC
+		 03 00 98 BD
+		 16 00 00 00
+		 3C EB 54 E4
+		 9F FC A3 F3
+		 0F FC DF FA
+		 FF E5 A9 23
+		 CB 33 08 33
+		 07 10 00 00
+		 81 0D 99 45
+		 16 00 19 00
+		 75 01 0A FA
+		 FF 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+	];
+};
diff --git a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi
new file mode 100644
index 0000000..3888047
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi
@@ -0,0 +1,81 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+qcom,itech_3000mah {
+	/* #Itech_B00826LF_3000mAh_ver1660_averaged_MasterSlave_Jan10th2017*/
+	qcom,max-voltage-uv = <4350000>;
+	qcom,fg-cc-cv-threshold-mv = <4340>;
+	qcom,fastchg-current-ma = <2000>;
+	qcom,batt-id-kohm = <100>;
+	qcom,battery-beta = <3435>;
+	qcom,battery-type = "itech_b00826lf_3000mah_ver1660_jan10th2017";
+	qcom,checksum = <0xFB8F>;
+	qcom,gui-version = "PMI8998GUI - 2.0.0.54";
+	qcom,fg-profile-data = [
+		 A4 1F 6E 05
+		 9C 0A 2B FC
+		 32 1D 23 E5
+		 60 0B 1B 15
+		 AD 17 8C 22
+		 EA 3C 89 4A
+		 5B 00 00 00
+		 12 00 00 00
+		 00 00 62 C2
+		 0C CD D8 C2
+		 19 00 08 00
+		 85 EA C7 EC
+		 E2 05 2F 01
+		 9B F5 12 12
+		 5E 05 88 3B
+		 22 06 09 20
+		 27 00 14 00
+		 7D 1F DD 05
+		 3F 0A E5 FC
+		 72 1D E3 F5
+		 6F 12 C0 1D
+		 88 18 FB 22
+		 8D 45 C6 52
+		 54 00 00 00
+		 0F 00 00 00
+		 00 00 BD CD
+		 55 C2 5D C5
+		 14 00 00 00
+		 7E 00 C7 EC
+		 60 06 BB 00
+		 59 06 61 03
+		 D9 FC 75 1B
+		 B3 33 CC FF
+		 07 10 00 00
+		 3E 0B 99 45
+		 14 00 40 00
+		 AE 01 0A FA
+		 FF 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+	];
+};
diff --git a/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-qrd-skuk-4v4-3000mah.dtsi b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-qrd-skuk-4v4-3000mah.dtsi
new file mode 100644
index 0000000..11600ef
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/fg-gen3-batterydata-qrd-skuk-4v4-3000mah.dtsi
@@ -0,0 +1,81 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+qcom,qrd_msm8998_skuk_3000mah {
+	/* QRD8997_ST1031GA_3000mAh_averaged_MasterSlave_Jan10th2017 */
+	qcom,max-voltage-uv = <4400000>;
+	qcom,fg-cc-cv-threshold-mv = <4390>;
+	qcom,fastchg-current-ma = <3000>;
+	qcom,batt-id-kohm = <68>;
+	qcom,battery-beta = <3380>;
+	qcom,battery-type = "qrd8997_st1031ga_3000mah";
+	qcom,checksum = <0xD299>;
+	qcom,gui-version = "PMI8998GUI - 2.0.0.54";
+	qcom,fg-profile-data = [
+		 70 1F B1 05
+		 6F 0A A1 FC
+		 8C 1D D7 FD
+		 C4 12 AC 1D
+		 7E 18 01 23
+		 8C 45 B6 52
+		 55 00 00 00
+		 0F 00 00 00
+		 00 00 92 C5
+		 95 CD A0 CA
+		 1F 00 08 00
+		 9F E3 C3 EC
+		 F7 FC 25 F3
+		 02 01 FF 12
+		 29 DC 1D 3A
+		 1C 06 09 20
+		 27 00 14 00
+		 AC 1F B4 05
+		 57 0A EF FC
+		 6A 1D E9 E2
+		 11 0B BB 14
+		 40 19 DC 22
+		 79 45 03 53
+		 53 00 00 00
+		 0E 00 00 00
+		 00 00 05 CC
+		 3A BB 24 CA
+		 1C 00 00 00
+		 56 F2 C3 EC
+		 A6 06 A2 F2
+		 9A 06 CC 01
+		 8C EA CF 1A
+		 BA 33 CC FF
+		 07 10 00 00
+		 3A 0C 66 46
+		 1C 00 40 00
+		 98 01 0A FA
+		 FF 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+		 00 00 00 00
+	];
+};
diff --git a/arch/arm64/boot/dts/qcom/pm8998.dtsi b/arch/arm64/boot/dts/qcom/pm8998.dtsi
index 5290f46..7c496f1 100644
--- a/arch/arm64/boot/dts/qcom/pm8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pm8998.dtsi
@@ -12,6 +12,7 @@
 
 #include <dt-bindings/spmi/spmi.h>
 #include <dt-bindings/interrupt-controller/irq.h>
+#include <dt-bindings/msm/power-on.h>
 
 &spmi_bus {
 	qcom,pm8998@0 {
@@ -42,10 +43,6 @@
 				qcom,pon-type = <0>;
 				qcom,pull-up = <1>;
 				linux,code = <116>;
-				qcom,support-reset = <1>;
-				qcom,s1-timer = <10256>;
-				qcom,s2-timer = <2000>;
-				qcom,s2-type = <1>;
 			};
 
 			qcom,pon_2 {
@@ -60,7 +57,7 @@
 				qcom,pull-up = <1>;
 				qcom,s1-timer = <6720>;
 				qcom,s2-timer = <2000>;
-				qcom,s2-type = <7>;
+				qcom,s2-type = <PON_POWER_OFF_DVDD_HARD_RESET>;
 				qcom,use-bark;
 			};
 		};
diff --git a/arch/arm64/boot/dts/qcom/pmi8998.dtsi b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
index 1f27b21..539685a 100644
--- a/arch/arm64/boot/dts/qcom/pmi8998.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi8998.dtsi
@@ -63,6 +63,175 @@
 			qcom,gpios-disallowed = <4 7 13>;
 		};
 
+		qcom,qpnp-qnovo@1500 {
+			compatible = "qcom,qpnp-qnovo";
+			reg = <0x1500 0x100>;
+			interrupts = <0x2 0x15 0x0 IRQ_TYPE_NONE>;
+			interrupt-names = "ptrain-done";
+			qcom,pmic-revid = <&pmi8998_revid>;
+		};
+
+		pmi8998_charger: qcom,qpnp-smb2 {
+			compatible = "qcom,qpnp-smb2";
+			#address-cells = <1>;
+			#size-cells = <1>;
+
+			qcom,pmic-revid = <&pmi8998_revid>;
+
+			io-channels = <&pmi8998_rradc 8>,
+				      <&pmi8998_rradc 10>,
+				      <&pmi8998_rradc 3>,
+				      <&pmi8998_rradc 4>;
+			io-channel-names = "charger_temp",
+					   "charger_temp_max",
+					   "usbin_i",
+					   "usbin_v";
+
+			qcom,boost-threshold-ua = <100000>;
+			qcom,wipower-max-uw = <5000000>;
+
+			qcom,thermal-mitigation
+					= <3000000 1500000 1000000 500000>;
+
+			qcom,chgr@1000 {
+				reg = <0x1000 0x100>;
+				interrupts =
+					<0x2 0x10 0x0 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x10 0x1 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x10 0x2 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x10 0x3 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x10 0x4 IRQ_TYPE_EDGE_RISING>;
+
+				interrupt-names = "chg-error",
+						  "chg-state-change",
+						  "step-chg-state-change",
+						  "step-chg-soc-update-fail",
+						  "step-chg-soc-update-request";
+			};
+
+			qcom,otg@1100 {
+				reg = <0x1100 0x100>;
+				interrupts = <0x2 0x11 0x0 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x11 0x1 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x11 0x2 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x11 0x3 IRQ_TYPE_EDGE_BOTH>;
+
+				interrupt-names = "otg-fail",
+						  "otg-overcurrent",
+						  "otg-oc-dis-sw-sts",
+						  "testmode-change-detect";
+			};
+
+			qcom,bat-if@1200 {
+				reg = <0x1200 0x100>;
+				interrupts =
+					<0x2 0x12 0x0 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x12 0x1 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x12 0x2 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x12 0x3 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x12 0x4 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x12 0x5 IRQ_TYPE_EDGE_BOTH>;
+
+				interrupt-names = "bat-temp",
+						  "bat-ocp",
+						  "bat-ov",
+						  "bat-low",
+						  "bat-therm-or-id-missing",
+						  "bat-terminal-missing";
+			};
+
+			qcom,usb-chgpth@1300 {
+				reg = <0x1300 0x100>;
+				interrupts =
+					<0x2 0x13 0x0 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x13 0x1 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x13 0x2 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x13 0x3 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x13 0x4 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x13 0x5 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x13 0x6 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x13 0x7 IRQ_TYPE_EDGE_RISING>;
+
+				interrupt-names = "usbin-collapse",
+						  "usbin-lt-3p6v",
+						  "usbin-uv",
+						  "usbin-ov",
+						  "usbin-plugin",
+						  "usbin-src-change",
+						  "usbin-icl-change",
+						  "type-c-change";
+			};
+
+			qcom,dc-chgpth@1400 {
+				reg = <0x1400 0x100>;
+				interrupts =
+					<0x2 0x14 0x0 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x14 0x1 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x14 0x2 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x14 0x3 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x14 0x4 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x14 0x5 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x14 0x6 IRQ_TYPE_EDGE_RISING>;
+
+				interrupt-names = "dcin-collapse",
+						  "dcin-lt-3p6v",
+						  "dcin-uv",
+						  "dcin-ov",
+						  "dcin-plugin",
+						  "div2-en-dg",
+						  "dcin-icl-change";
+			};
+
+			qcom,chgr-misc@1600 {
+				reg = <0x1600 0x100>;
+				interrupts =
+					<0x2 0x16 0x0 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x16 0x1 IRQ_TYPE_EDGE_RISING>,
+					<0x2 0x16 0x2 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x16 0x3 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x16 0x4 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x16 0x5 IRQ_TYPE_EDGE_BOTH>,
+					<0x2 0x16 0x6 IRQ_TYPE_EDGE_FALLING>,
+					<0x2 0x16 0x7 IRQ_TYPE_EDGE_BOTH>;
+
+				interrupt-names = "wdog-snarl",
+						  "wdog-bark",
+						  "aicl-fail",
+						  "aicl-done",
+						  "high-duty-cycle",
+						  "input-current-limiting",
+						  "temperature-change",
+						  "switcher-power-ok";
+			};
+		};
+
+		pmi8998_pdphy: qcom,usb-pdphy@1700 {
+			compatible = "qcom,qpnp-pdphy";
+			reg = <0x1700 0x100>;
+			vdd-pdphy-supply = <&pm8998_l24>;
+			vbus-supply = <&smb2_vbus>;
+			vconn-supply = <&smb2_vconn>;
+			interrupts = <0x2 0x17 0x0 IRQ_TYPE_EDGE_RISING>,
+				     <0x2 0x17 0x1 IRQ_TYPE_EDGE_RISING>,
+				     <0x2 0x17 0x2 IRQ_TYPE_EDGE_RISING>,
+				     <0x2 0x17 0x3 IRQ_TYPE_EDGE_RISING>,
+				     <0x2 0x17 0x4 IRQ_TYPE_EDGE_RISING>,
+				     <0x2 0x17 0x5 IRQ_TYPE_EDGE_RISING>,
+				     <0x2 0x17 0x6 IRQ_TYPE_EDGE_RISING>;
+
+			interrupt-names = "sig-tx",
+					  "sig-rx",
+					  "msg-tx",
+					  "msg-rx",
+					  "msg-tx-failed",
+					  "msg-tx-discarded",
+					  "msg-rx-discarded";
+
+			qcom,default-sink-caps = <5000 3000>, /* 5V @ 3A */
+						 <9000 3000>, /* 9V @ 3A */
+						 <12000 2250>; /* 12V @ 2.25A */
+		};
+
 		pmi8998_rradc: rradc@4500 {
 			compatible = "qcom,rradc";
 			reg = <0x4500 0x100>;
@@ -71,6 +240,70 @@
 			#io-channel-cells = <1>;
 			qcom,pmic-revid = <&pmi8998_revid>;
 		};
+
+		pmi8998_fg: qpnp,fg {
+			compatible = "qcom,fg-gen3";
+			#address-cells = <1>;
+			#size-cells = <1>;
+			qcom,pmic-revid = <&pmi8998_revid>;
+			io-channels = <&pmi8998_rradc 0>;
+			io-channel-names = "rradc_batt_id";
+			qcom,rradc-base = <0x4500>;
+			qcom,fg-esr-timer-awake = <96>;
+			qcom,fg-esr-timer-asleep = <256>;
+			qcom,cycle-counter-en;
+			status = "okay";
+
+			qcom,fg-batt-soc@4000 {
+				status = "okay";
+				reg = <0x4000 0x100>;
+				interrupts = <0x2 0x40 0x0 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x40 0x1 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x40 0x2
+							IRQ_TYPE_EDGE_RISING>,
+					     <0x2 0x40 0x3
+							IRQ_TYPE_EDGE_RISING>,
+					     <0x2 0x40 0x4 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x40 0x5
+							IRQ_TYPE_EDGE_RISING>,
+					     <0x2 0x40 0x6 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x40 0x7 IRQ_TYPE_EDGE_BOTH>;
+				interrupt-names = "soc-update",
+						  "soc-ready",
+						  "bsoc-delta",
+						  "msoc-delta",
+						  "msoc-low",
+						  "msoc-empty",
+						  "msoc-high",
+						  "msoc-full";
+			};
+
+			qcom,fg-batt-info@4100 {
+				status = "okay";
+				reg = <0x4100 0x100>;
+				interrupts = <0x2 0x41 0x0 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x41 0x1 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x41 0x2 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x41 0x3 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x41 0x6 IRQ_TYPE_EDGE_BOTH>;
+				interrupt-names = "vbatt-pred-delta",
+						  "vbatt-low",
+						  "esr-delta",
+						  "batt-missing",
+						  "batt-temp-delta";
+			};
+
+			qcom,fg-memif@4400 {
+				status = "okay";
+				reg = <0x4400 0x100>;
+				interrupts = <0x2 0x44 0x0 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x44 0x1 IRQ_TYPE_EDGE_BOTH>,
+					     <0x2 0x44 0x2 IRQ_TYPE_EDGE_BOTH>;
+				interrupt-names = "ima-rdy",
+						  "mem-xcp",
+						  "dma-grant";
+			};
+		};
 	};
 
 	qcom,pmi8998@3 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index cd324e6..7f9351d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -119,14 +119,74 @@
 	status = "ok";
 };
 
-&pmi8998_flash2 {
+&pmi8998_switch1 {
 	pinctrl-names = "led_enable", "led_disable";
 	pinctrl-0 = <&flash_led3_front_en>;
 	pinctrl-1 = <&flash_led3_front_dis>;
 };
 
-&pmi8998_torch2 {
-	pinctrl-names = "led_enable", "led_disable";
-	pinctrl-0 = <&flash_led3_front_en>;
-	pinctrl-1 = <&flash_led3_front_dis>;
+&pmi8998_charger {
+	qcom,batteryless-platform;
+};
+
+/ {
+	extcon_usb1: extcon_usb1 {
+		compatible = "linux,extcon-usb-gpio";
+		id-gpio = <&pmi8998_gpios 9 GPIO_ACTIVE_HIGH>;
+		vbus-gpio = <&pmi8998_gpios 8 GPIO_ACTIVE_HIGH>;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&usb2_vbus_det_default
+			     &usb2_id_det_default>;
+	};
+
+	usb1_vbus_vreg: usb1_vbus_vreg {
+		compatible = "regulator-fixed";
+		regulator-name = "usb_vbus_vreg";
+		gpio = <&pmi8998_gpios 2 GPIO_ACTIVE_HIGH>;
+		enable-active-high;
+
+		/* Typical EN-to-VBUS turn on time for NX5P1100 */
+		regulator-enable-ramp-delay = <630>;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&usb2_vbus_boost_default>;
+	};
+
+aliases {
+		serial0 = &qupv3_se9_2uart;
+		spi0 = &qupv3_se8_spi;
+		i2c0 = &qupv3_se10_i2c;
+		i2c1 = &qupv3_se3_i2c;
+	};
+};
+
+&qupv3_se9_2uart {
+	status = "ok";
+};
+
+&qupv3_se8_spi {
+	status = "ok";
+};
+
+&qupv3_se3_i2c {
+	status = "ok";
+};
+
+&qupv3_se10_i2c {
+	status = "ok";
+};
+
+&usb1 {
+	status = "okay";
+	extcon = <&extcon_usb1>;
+	vbus_dwc3-supply = <&usb1_vbus_vreg>;
+};
+
+&qusb_phy1 {
+	status = "okay";
+};
+
+&usb_qmp_phy {
+	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
index e7ff343..a3adcec 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-coresight.dtsi
@@ -476,6 +476,16 @@
 			};
 
 			port@2 {
+				reg = <2>;
+				funnel_in2_in_funnel_modem: endpoint {
+					slave-mode;
+					remote-endpoint =
+					  <&funnel_modem_out_funnel_in2>;
+				};
+
+			};
+
+			port@3 {
 				reg = <5>;
 				funnel_in2_in_funnel_apss_merg: endpoint {
 					slave-mode;
@@ -495,12 +505,17 @@
 		coresight-name = "coresight-tpda";
 
 		qcom,tpda-atid = <65>;
-		qcom,bc-elem-size = <13 32>;
-		qcom,tc-elem-size = <7 32>,
+		qcom,bc-elem-size = <10 32>,
 				    <13 32>;
-		qcom,dsb-elem-size = <13 32>;
-		qcom,cmb-elem-size = <7 32>,
-				     <8 32>,
+		qcom,tc-elem-size = <13 32>;
+		qcom,dsb-elem-size = <0 32>,
+				     <2 32>,
+				     <3 32>,
+				     <10 32>,
+				     <11 32>,
+				     <13 32>;
+		qcom,cmb-elem-size = <3 64>,
+				     <7 64>,
 				     <13 64>;
 
 		clocks = <&clock_gcc RPMH_QDSS_CLK>,
@@ -520,6 +535,33 @@
 			};
 
 			port@1 {
+				reg = <0>;
+				tpda_in_tpdm_center: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_center_out_tpda>;
+				};
+			};
+
+			port@2 {
+				reg = <2>;
+				tpda_in_funnel_dl_mm: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_dl_mm_out_tpda>;
+				};
+			};
+
+			port@3 {
+				reg = <3>;
+				tpda_in_funnel_ddr_0: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&funnel_ddr_0_out_tpda>;
+				};
+			};
+
+			port@4 {
 				reg = <7>;
 				tpda_in_tpdm_vsense: endpoint {
 					slave-mode;
@@ -528,16 +570,25 @@
 				};
 			};
 
-			port@2 {
-				reg = <8>;
-				tpda_in_tpdm_dcc: endpoint {
+			port@5 {
+				reg = <10>;
+				tpda_in_tpdm_qm: endpoint {
 					slave-mode;
 					remote-endpoint =
-						<&tpdm_dcc_out_tpda>;
+						<&tpdm_qm_out_tpda>;
 				};
 			};
 
-			port@3 {
+			port@6 {
+				reg = <11>;
+				tpda_in_tpdm_north: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_north_out_tpda>;
+				};
+			};
+
+			port@7 {
 				reg = <13>;
 				tpda_in_tpdm_pimem: endpoint {
 					slave-mode;
@@ -548,6 +599,423 @@
 		};
 	};
 
+	funnel_modem: funnel@6832000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6832000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-modem";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_modem_out_funnel_in2: endpoint {
+					remote-endpoint =
+					    <&funnel_in2_in_funnel_modem>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				funnel_modem_in_tpda_modem: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpda_modem_out_funnel_modem>;
+				};
+			};
+		};
+	};
+
+	tpda_modem: tpda@6831000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x6831000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-modem";
+
+		qcom,tpda-atid = <67>;
+		qcom,dsb-elem-size = <0 32>;
+		qcom,cmb-elem-size = <0 64>;
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_modem_out_funnel_modem: endpoint {
+					remote-endpoint =
+						<&funnel_modem_in_tpda_modem>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				tpda_modem_in_tpdm_modem: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_modem_out_tpda_modem>;
+				};
+			};
+		};
+	};
+
+	tpdm_modem: tpdm@6830000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x6830000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-modem";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_modem_out_tpda_modem: endpoint {
+				remote-endpoint = <&tpda_modem_in_tpdm_modem>;
+			};
+		};
+	};
+
+	tpdm_center: tpdm@6c28000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x6c28000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-center";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_center_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_center>;
+			};
+		};
+	};
+
+	tpdm_north: tpdm@6a24000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x6a24000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-north";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_north_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_north>;
+			};
+		};
+	};
+
+	tpdm_qm: tpdm@69d0000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x69d0000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-qm";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_qm_out_tpda: endpoint {
+				remote-endpoint = <&tpda_in_tpdm_qm>;
+			};
+		};
+	};
+
+	tpda_apss: tpda@7862000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x7862000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-apss";
+
+		qcom,tpda-atid = <66>;
+		qcom,dsb-elem-size = <0 32>;
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_apss_out_funnel_apss_merg: endpoint {
+					remote-endpoint =
+					       <&funnel_apss_merg_in_tpda_apss>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				tpda_apss_in_tpdm_apss: endpoint {
+					slave-mode;
+					remote-endpoint =
+						<&tpdm_apss_out_tpda_apss>;
+				};
+			};
+		};
+	};
+
+	tpdm_apss: tpdm@7860000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x7860000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-apss";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_apss_out_tpda_apss: endpoint {
+				remote-endpoint = <&tpda_apss_in_tpdm_apss>;
+			};
+		};
+	};
+
+	tpda_llm_silver: tpda@78c0000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x78c0000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-llm-silver";
+
+		qcom,tpda-atid = <72>;
+		qcom,cmb-elem-size = <0 64>;
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_llm_silver_out_funnel_apss_merg: endpoint {
+					remote-endpoint =
+					<&funnel_apss_merg_in_tpda_llm_silver>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				tpda_llm_silver_in_tpdm_llm_silver: endpoint {
+					slave-mode;
+					remote-endpoint =
+					<&tpdm_llm_silver_out_tpda_llm_silver>;
+				};
+			};
+		};
+	};
+
+	tpdm_llm_silver: tpdm@78a0000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x78a0000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-llm-silver";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_llm_silver_out_tpda_llm_silver: endpoint {
+				remote-endpoint =
+					<&tpda_llm_silver_in_tpdm_llm_silver>;
+			};
+		};
+	};
+
+	tpda_llm_gold: tpda@78d0000 {
+		compatible = "qcom,coresight-tpda";
+		reg = <0x78d0000 0x1000>;
+		reg-names = "tpda-base";
+
+		coresight-name = "coresight-tpda-llm-gold";
+
+		qcom,tpda-atid = <73>;
+		qcom,cmb-elem-size = <0 64>;
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+			port@0 {
+				reg = <0>;
+				tpda_llm_gold_out_funnel_apss_merg: endpoint {
+					remote-endpoint =
+					  <&funnel_apss_merg_in_tpda_llm_gold>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				tpda_llm_gold_in_tpdm_llm_gold: endpoint {
+					slave-mode;
+					remote-endpoint =
+					  <&tpdm_llm_gold_out_tpda_llm_gold>;
+				};
+			};
+		};
+	};
+
+	tpdm_llm_gold: tpdm@78b0000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x78b0000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-llm-gold";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_llm_gold_out_tpda_llm_gold: endpoint {
+				remote-endpoint =
+					<&tpda_llm_gold_in_tpdm_llm_gold>;
+			};
+		};
+	};
+
+	funnel_dl_mm: funnel@6c0b000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x6c0b000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-dl-mm";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_dl_mm_out_tpda: endpoint {
+					remote-endpoint =
+					    <&tpda_in_funnel_dl_mm>;
+				};
+			};
+
+			port@1 {
+				reg = <1>;
+				funnel_dl_mm_in_tpdm_mm: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&tpdm_mm_out_funnel_dl_mm>;
+				};
+			};
+		};
+	};
+
+	tpdm_mm: tpdm@6c08000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x6c08000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-mm";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_mm_out_funnel_dl_mm: endpoint {
+				remote-endpoint = <&funnel_dl_mm_in_tpdm_mm>;
+			};
+		};
+	};
+
+	funnel_ddr_0: funnel@69e2000 {
+		compatible = "arm,primecell";
+		arm,primecell-periphid = <0x0003b908>;
+
+		reg = <0x69e2000 0x1000>;
+		reg-names = "funnel-base";
+
+		coresight-name = "coresight-funnel-ddr-0";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "apb_pclk", "core_a_clk";
+
+		ports {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
+			port@0 {
+				reg = <0>;
+				funnel_ddr_0_out_tpda: endpoint {
+					remote-endpoint =
+					    <&tpda_in_funnel_ddr_0>;
+				};
+			};
+
+			port@1 {
+				reg = <0>;
+				funnel_ddr_0_in_tpdm_ddr: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&tpdm_ddr_out_funnel_ddr_0>;
+				};
+			};
+		};
+	};
+
+	tpdm_ddr: tpdm@69e0000 {
+		compatible = "qcom,coresight-tpdm";
+		reg = <0x69e0000 0x1000>;
+		reg-names = "tpdm-base";
+
+		coresight-name = "coresight-tpdm-ddr";
+
+		clocks = <&clock_gcc RPMH_QDSS_CLK>,
+			 <&clock_gcc RPMH_QDSS_A_CLK>;
+		clock-names = "core_clk", "core_a_clk";
+
+		port {
+			tpdm_ddr_out_funnel_ddr_0: endpoint {
+				remote-endpoint = <&funnel_ddr_0_in_tpdm_ddr>;
+			};
+		};
+	};
+
 	tpdm_pimem: tpdm@6850000 {
 		compatible = "qcom,coresight-tpdm";
 		reg = <0x6850000 0x1000>;
@@ -566,25 +1034,6 @@
 		};
 	};
 
-
-	tpdm_dcc: tpdm@6870000 {
-		compatible = "qcom,coresight-tpdm";
-		reg = <0x6870000 0x1000>;
-		reg-names = "tpdm-base";
-
-		coresight-name = "coresight-tpdm-dcc";
-
-		clocks = <&clock_gcc RPMH_QDSS_CLK>,
-			 <&clock_gcc RPMH_QDSS_A_CLK>;
-		clock-names = "core_clk", "core_a_clk";
-
-		port {
-			tpdm_dcc_out_tpda: endpoint {
-				remote-endpoint = <&tpda_in_tpdm_dcc>;
-			};
-		};
-	};
-
 	tpdm_vsense: tpdm@6840000 {
 		compatible = "qcom,coresight-tpdm";
 		reg = <0x6840000 0x1000>;
@@ -1129,13 +1578,40 @@
 			};
 
 			port@2 {
-				reg = <1>;
+				reg = <2>;
 				funnel_apss_merg_in_tpda_olc: endpoint {
 					slave-mode;
 					remote-endpoint =
 					    <&tpda_olc_out_funnel_apss_merg>;
 				};
 			};
+
+			port@3 {
+				reg = <4>;
+				funnel_apss_merg_in_tpda_apss: endpoint {
+					slave-mode;
+					remote-endpoint =
+					    <&tpda_apss_out_funnel_apss_merg>;
+				};
+			};
+
+			port@4 {
+				reg = <5>;
+				funnel_apss_merg_in_tpda_llm_silver: endpoint {
+					slave-mode;
+					remote-endpoint =
+					<&tpda_llm_silver_out_funnel_apss_merg>;
+				};
+			};
+
+			port@5 {
+				reg = <6>;
+				funnel_apss_merg_in_tpda_llm_gold: endpoint {
+					slave-mode;
+					remote-endpoint =
+					  <&tpda_llm_gold_out_funnel_apss_merg>;
+				};
+			};
 		};
 	};
 
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index a6efb50..3f2317a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -256,6 +256,9 @@
 				"axi_clk", "memnoc_clk";
 
 		qcom,gmu-pwrlevels {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
 			compatible = "qcom,gmu-pwrlevels";
 
 			qcom,gmu-pwrlevel@0 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index 6d61506..f7da384 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -104,14 +104,67 @@
 	status = "ok";
 };
 
-&pmi8998_flash2 {
+&pmi8998_switch1 {
 	pinctrl-names = "led_enable", "led_disable";
 	pinctrl-0 = <&flash_led3_front_en>;
 	pinctrl-1 = <&flash_led3_front_dis>;
 };
 
-&pmi8998_torch2 {
-	pinctrl-names = "led_enable", "led_disable";
-	pinctrl-0 = <&flash_led3_front_en>;
-	pinctrl-1 = <&flash_led3_front_dis>;
+/{
+	mtp_batterydata: qcom,battery-data {
+		qcom,batt-id-range-pct = <15>;
+		#include "fg-gen3-batterydata-itech-3000mah.dtsi"
+		#include "fg-gen3-batterydata-ascent-3450mah.dtsi"
+		#include "fg-gen3-batterydata-demo-6000mah.dtsi"
+	};
+
+	extcon_usb1: extcon_usb1 {
+		compatible = "linux,extcon-usb-gpio";
+		vbus-gpio = <&pmi8998_gpios 8 GPIO_ACTIVE_HIGH>;
+
+		pinctrl-names = "default";
+		pinctrl-0 = <&usb2_vbus_det_default>;
+	};
+};
+
+&pmi8998_fg {
+	qcom,battery-data = <&mtp_batterydata>;
+};
+
+/ {
+aliases {
+		serial0 = &qupv3_se9_2uart;
+		spi0 = &qupv3_se8_spi;
+		i2c0 = &qupv3_se10_i2c;
+		i2c1 = &qupv3_se3_i2c;
+	};
+};
+
+&qupv3_se9_2uart {
+	status = "ok";
+};
+
+&qupv3_se8_spi {
+	status = "ok";
+};
+
+&qupv3_se3_i2c {
+	status = "ok";
+};
+
+&qupv3_se10_i2c {
+	status = "ok";
+};
+
+&usb1 {
+	status = "okay";
+	extcon = <&extcon_usb1>;
+};
+
+&qusb_phy1 {
+	status = "okay";
+};
+
+&usb_qmp_phy {
+	status = "okay";
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
index f300684..a5ed9aa 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-pinctrl.dtsi
@@ -70,16 +70,28 @@
 			flash_led3_front_en: flash_led3_front_en {
 				mux {
 					pins = "gpio21";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio21";
 					drive_strength = <2>;
 					output-high;
+					bias-disable;
 				};
 			};
 
 			flash_led3_front_dis: flash_led3_front_dis {
 				mux {
 					pins = "gpio21";
+					function = "gpio";
+				};
+
+				config {
+					pins = "gpio21";
 					drive_strength = <2>;
 					output-low;
+					bias-disable;
 				};
 			};
 		};
@@ -2317,3 +2329,34 @@
 		};
 	};
 };
+
+&pmi8998_gpios {
+	usb2_vbus_boost {
+		usb2_vbus_boost_default: usb2_vbus_boost_default {
+			pins = "gpio2";
+			function = "normal";
+			output-low;
+			power-source = <0>;
+		};
+	};
+
+	usb2_vbus_det {
+		usb2_vbus_det_default: usb2_vbus_det_default {
+			pins = "gpio8";
+			function = "normal";
+			input-enable;
+			bias-pull-down;
+			power-source = <1>;	/* VPH input supply */
+		};
+	};
+
+	usb2_id_det {
+		usb2_id_det_default: usb2_id_det_default {
+			pins = "gpio9";
+			function = "normal";
+			input-enable;
+			bias-pull-up;
+			power-source = <0>;
+		};
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
index 6ea92ee..1d5bf3a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-qrd.dtsi
@@ -9,3 +9,23 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
+
+/{
+	qrd_batterydata: qcom,battery-data {
+		qcom,batt-id-range-pct = <15>;
+		#include "fg-gen3-batterydata-itech-3000mah.dtsi"
+		#include "fg-gen3-batterydata-ascent-3450mah.dtsi"
+	};
+};
+
+&pmi8998_fg {
+	qcom,battery-data = <&qrd_batterydata>;
+};
+
+&soc {
+	sound-tavil {
+		qcom,wsa-max-devs = <1>;
+		qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>;
+		qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrRight";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
index 540f82f..9a5cb3b 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-regulator.dtsi
@@ -89,9 +89,9 @@
 				qcom,cpr-corner-fmax-map = <6 12 17>;
 
 				qcom,cpr-voltage-ceiling =
-					<688000  688000  688000  688000  688000
-					 688000  756000  756000  756000  812000
-					 812000  812000  872000  872000  872000
+					<872000  872000  872000  872000  872000
+					 872000  872000  872000  872000  872000
+					 872000  872000  872000  872000  872000
 					 872000  928000>;
 
 				qcom,cpr-voltage-floor =
@@ -114,9 +114,23 @@
 					1286400000 1363200000 1440000000
 					1516800000 1593600000>;
 
+				qcom,cpr-ro-scaling-factor =
+					<2594 2795 2576 2761 2469 2673 2198
+					 2553 3188 3255 3191 2962 3055 2984
+					 2043 2947>,
+					<2594 2795 2576 2761 2469 2673 2198
+					 2553 3188 3255 3191 2962 3055 2984
+					 2043 2947>,
+					<2259 2389 2387 2531 2294 2464 2218
+					 2476 2525 2855 2817 2836 2740 2490
+					 1950 2632>;
+
 				qcom,cpr-open-loop-voltage-fuse-adjustment =
 					<100000 100000 100000>;
 
+				qcom,cpr-closed-loop-voltage-fuse-adjustment =
+					<100000 100000 100000>;
+
 				qcom,allow-voltage-interpolation;
 				qcom,allow-quotient-interpolation;
 				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
@@ -144,8 +158,8 @@
 				qcom,cpr-corner-fmax-map = <4 7 9>;
 
 				qcom,cpr-voltage-ceiling =
-					<688000  688000  688000  688000  756000
-					 812000  812000  872000  928000>;
+					<872000  872000  872000  872000  872000
+					 872000  872000  872000  928000>;
 
 				qcom,cpr-voltage-floor =
 					<568000  568000  568000  568000  568000
@@ -160,9 +174,23 @@
 					 576000000  652800000  729600000
 					 806400000  883200000  960000000>;
 
+				qcom,cpr-ro-scaling-factor =
+					<2857 3056 2828 2952 2699 2796 2447
+					 2631 2630 2579 2244 3343 3287 3137
+					 3164 2656>,
+					<2857 3056 2828 2952 2699 2796 2447
+					 2631 2630 2579 2244 3343 3287 3137
+					 3164 2656>,
+					<2439 2577 2552 2667 2461 2577 2394
+					 2536 2132 2307 2191 2903 2838 2912
+					 2501 2095>;
+
 				qcom,cpr-open-loop-voltage-fuse-adjustment =
 					<100000 100000 100000>;
 
+				qcom,cpr-closed-loop-voltage-fuse-adjustment =
+					<100000 100000 100000>;
+
 				qcom,allow-voltage-interpolation;
 				qcom,allow-quotient-interpolation;
 				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
@@ -233,9 +261,9 @@
 					<10 17 22>;
 
 				qcom,cpr-voltage-ceiling =
-					<756000  756000  756000  756000  756000
-					 756000  756000  756000  756000  756000
-					 812000  812000  828000  828000  828000
+					<828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
+					 828000  828000  828000  828000  828000
 					 828000  828000  884000  952000  952000
 					1056000 1056000>;
 
@@ -263,9 +291,23 @@
 					1728000000 1804800000 1881600000
 					1958400000>;
 
+				qcom,cpr-ro-scaling-factor =
+					<2857 3056 2828 2952 2699 2796 2447
+					 2631 2630 2579 2244 3343 3287 3137
+					 3164 2656>,
+					<2857 3056 2828 2952 2699 2796 2447
+					 2631 2630 2579 2244 3343 3287 3137
+					 3164 2656>,
+					<2086 2208 2273 2408 2203 2327 2213
+					 2340 1755 2039 2049 2474 2437 2618
+					 2003 1675>;
+
 				qcom,cpr-open-loop-voltage-fuse-adjustment =
 					<100000 100000 100000>;
 
+				qcom,cpr-closed-loop-voltage-fuse-adjustment =
+					<100000 100000 100000>;
+
 				qcom,allow-voltage-interpolation;
 				qcom,allow-quotient-interpolation;
 				qcom,cpr-scaled-open-loop-voltage-as-ceiling;
@@ -417,6 +459,7 @@
 			regulator-max-microvolt = <1200000>;
 			qcom,init-voltage = <1200000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
+			regulator-always-on;
 		};
 	};
 
@@ -517,7 +560,7 @@
 			regulator-name = "pm8998_l8";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1200000>;
-			regulator-max-microvolt = <1200000>;
+			regulator-max-microvolt = <1248000>;
 			qcom,init-voltage = <1200000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
@@ -534,9 +577,9 @@
 		pm8998_l9: regulator-l9 {
 			regulator-name = "pm8998_l9";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
-			regulator-min-microvolt = <1808000>;
-			regulator-max-microvolt = <2960000>;
-			qcom,init-voltage = <1808000>;
+			regulator-min-microvolt = <1704000>;
+			regulator-max-microvolt = <2928000>;
+			qcom,init-voltage = <1704000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
@@ -552,9 +595,9 @@
 		pm8998_l10: regulator-l10 {
 			regulator-name = "pm8998_l10";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
-			regulator-min-microvolt = <1808000>;
-			regulator-max-microvolt = <2960000>;
-			qcom,init-voltage = <1808000>;
+			regulator-min-microvolt = <1704000>;
+			regulator-max-microvolt = <2928000>;
+			qcom,init-voltage = <1704000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
@@ -571,7 +614,7 @@
 			regulator-name = "pm8998_l11";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1000000>;
-			regulator-max-microvolt = <1000000>;
+			regulator-max-microvolt = <1048000>;
 			qcom,init-voltage = <1000000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
@@ -606,9 +649,9 @@
 		pm8998_l13: regulator-l13 {
 			regulator-name = "pm8998_l13";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
-			regulator-min-microvolt = <1808000>;
+			regulator-min-microvolt = <1800000>;
 			regulator-max-microvolt = <2960000>;
-			qcom,init-voltage = <1808000>;
+			qcom,init-voltage = <1800000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
@@ -625,7 +668,7 @@
 			regulator-name = "pm8998_l14";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <1800000>;
-			regulator-max-microvolt = <1800000>;
+			regulator-max-microvolt = <1880000>;
 			qcom,init-voltage = <1800000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
@@ -697,7 +740,7 @@
 			regulator-name = "pm8998_l18";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <2704000>;
-			regulator-max-microvolt = <2704000>;
+			regulator-max-microvolt = <2960000>;
 			qcom,init-voltage = <2704000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
@@ -714,9 +757,9 @@
 		pm8998_l19: regulator-l19 {
 			regulator-name = "pm8998_l19";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
-			regulator-min-microvolt = <3008000>;
-			regulator-max-microvolt = <3008000>;
-			qcom,init-voltage = <3008000>;
+			regulator-min-microvolt = <2856000>;
+			regulator-max-microvolt = <3104000>;
+			qcom,init-voltage = <2856000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
@@ -732,9 +775,9 @@
 		pm8998_l20: regulator-l20 {
 			regulator-name = "pm8998_l20";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
-			regulator-min-microvolt = <2960000>;
+			regulator-min-microvolt = <2704000>;
 			regulator-max-microvolt = <2960000>;
-			qcom,init-voltage = <2960000>;
+			qcom,init-voltage = <2704000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
@@ -750,9 +793,9 @@
 		pm8998_l21: regulator-l21 {
 			regulator-name = "pm8998_l21";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
-			regulator-min-microvolt = <2960000>;
+			regulator-min-microvolt = <2704000>;
 			regulator-max-microvolt = <2960000>;
-			qcom,init-voltage = <2960000>;
+			qcom,init-voltage = <2704000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
@@ -769,7 +812,7 @@
 			regulator-name = "pm8998_l22";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
 			regulator-min-microvolt = <2864000>;
-			regulator-max-microvolt = <2864000>;
+			regulator-max-microvolt = <3312000>;
 			qcom,init-voltage = <2864000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
@@ -786,9 +829,9 @@
 		pm8998_l23: regulator-l23 {
 			regulator-name = "pm8998_l23";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
-			regulator-min-microvolt = <3312000>;
+			regulator-min-microvolt = <3000000>;
 			regulator-max-microvolt = <3312000>;
-			qcom,init-voltage = <3312000>;
+			qcom,init-voltage = <3000000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
@@ -823,9 +866,9 @@
 		pm8998_l25: regulator-l25 {
 			regulator-name = "pm8998_l25";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
-			regulator-min-microvolt = <3104000>;
-			regulator-max-microvolt = <3104000>;
-			qcom,init-voltage = <3104000>;
+			regulator-min-microvolt = <3000000>;
+			regulator-max-microvolt = <3312000>;
+			qcom,init-voltage = <3000000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
@@ -872,9 +915,9 @@
 		pm8998_l28: regulator-l28 {
 			regulator-name = "pm8998_l28";
 			qcom,set = <RPMH_REGULATOR_SET_ALL>;
-			regulator-min-microvolt = <3008000>;
+			regulator-min-microvolt = <2856000>;
 			regulator-max-microvolt = <3008000>;
-			qcom,init-voltage = <3008000>;
+			qcom,init-voltage = <2856000>;
 			qcom,init-mode = <RPMH_REGULATOR_MODE_LDO_LPM>;
 		};
 	};
@@ -955,3 +998,13 @@
 		};
 	};
 };
+
+&pmi8998_charger {
+	smb2_vbus: qcom,smb2-vbus {
+		regulator-name = "smb2-vbus";
+	};
+
+	smb2_vconn: qcom,smb2-vconn {
+		regulator-name = "smb2-vconn";
+	};
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-rumi.dts b/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
index 0f31c0a..be41858 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
@@ -16,7 +16,6 @@
 
 #include "sdm845.dtsi"
 #include "sdm845-rumi.dtsi"
-#include "sdm845-usb.dtsi"
 / {
 	model = "Qualcomm Technologies, Inc. SDM845 RUMI";
 	compatible = "qcom,sdm845-rumi", "qcom,sdm845", "qcom,rumi";
@@ -28,32 +27,3 @@
 		status = "disabled";
 	};
 };
-
-&usb0 {
-	/delete-property/ qcom,usb-dbm;
-	qcom,charging-disabled;
-	dwc3@a600000 {
-		maximum-speed = "high-speed";
-	};
-};
-
-&qusb_phy0 {
-	reg = <0x088e2000 0x4>,
-	      <0x0a720000 0x9500>;
-	reg-names = "qusb_phy_base",
-		"emu_phy_base";
-	qcom,emulation;
-	qcom,emu-init-seq = <0x19 0x1404
-			     0x20 0x1414
-			     0x79 0x1410
-			     0x00 0x1418
-			     0x99 0x1404
-			     0x04 0x1408
-			     0xd9 0x1404>;
-
-	qcom,emu-dcm-reset-seq = <0x5 0x14	/* 0x1 0x14 for E1.2 */
-				  0x100000 0x20
-				  0x0 0x20
-				  0x1a0 0x20	/* 0x220 0x20 for E1.2 */
-				  0x80 0x28>;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
index 80f34bf..3ec83f5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
@@ -147,3 +147,37 @@
 	spm-level = <0>;
 	status = "ok";
 };
+
+&pmi8998_charger {
+	qcom,suspend-input;
+};
+
+&usb0 {
+	/delete-property/ qcom,usb-dbm;
+	extcon = <0>, <0>, <&eud>;
+	qcom,charging-disabled;
+	dwc3@a600000 {
+		maximum-speed = "high-speed";
+	};
+};
+
+&qusb_phy0 {
+	reg = <0x088e2000 0x4>,
+	      <0x0a720000 0x9500>;
+	reg-names = "qusb_phy_base",
+		"emu_phy_base";
+	qcom,emulation;
+	qcom,emu-init-seq = <0x19 0x1404
+			     0x20 0x1414
+			     0x79 0x1410
+			     0x00 0x1418
+			     0x99 0x1404
+			     0x04 0x1408
+			     0xd9 0x1404>;
+
+	qcom,emu-dcm-reset-seq = <0x5 0x14	/* 0x1 0x14 for E1.2 */
+				  0x100000 0x20
+				  0x0 0x20
+				  0x1a0 0x20	/* 0x220 0x20 for E1.2 */
+				  0x80 0x28>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index ab4c253..3e00577 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -13,21 +13,27 @@
 &soc {
 	mdss_mdp: qcom,mdss_mdp@ae00000 {
 		compatible = "qcom,sde-kms";
-		reg = <0x0ae00000 0x81a24>,
+		reg = <0x0ae00000 0x81d40>,
 		      <0x0aeb0000 0x2008>;
 		reg-names = "mdp_phys",
 			"vbif_phys";
 
-		clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
+		clocks =
+			<&clock_gcc GCC_DISP_AHB_CLK>,
+			<&clock_gcc GCC_DISP_AXI_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
 			<&clock_dispcc DISP_CC_MDSS_AXI_CLK>,
 			<&clock_dispcc DISP_CC_MDSS_MDP_CLK_SRC>,
-			<&clock_dispcc DISP_CC_MDSS_MDP_CLK>;
-		clock-names = "iface_clk", "bus_clk",
-			"core_clk_src", "core_clk";
-		clock-rate = <0 0 300000000 300000000>;
-		clock-max-rate = <0 0 430000000 430000000>;
+			<&clock_dispcc DISP_CC_MDSS_VSYNC_CLK_SRC>,
+			<&clock_dispcc DISP_CC_MDSS_MDP_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_VSYNC_CLK>;
+		clock-names = "gcc_iface", "gcc_bus",
+			"iface_clk", "bus_clk", "core_clk_src",
+			"vsync_clk_src", "core_clk", "vsync_clk";
+		clock-rate = <0 0 0 0 300000000 0 300000000 0 0>;
+		clock-max-rate = <0 0 0 0 430000000 0 430000000 0 0>;
 
-		mdp-vdd-supply = <&mdss_core_gdsc>;
+		sde-vdd-supply = <&mdss_core_gdsc>;
 
 		/* interrupt config */
 		interrupt-parent = <&intc>;
@@ -142,7 +148,7 @@
 
 			qcom,platform-supply-entry@0 {
 				reg = <0>;
-				qcom,supply-name = "mdp-vdd";
+				qcom,supply-name = "sde-vdd";
 				qcom,supply-min-voltage = <0>;
 				qcom,supply-max-voltage = <0>;
 				qcom,supply-enable-load = <0>;
@@ -184,6 +190,11 @@
 		qcom,sde-rsc-version = <1>;
 
 		vdd-supply = <&mdss_core_gdsc>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_RSCC_AHB_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_RSCC_VSYNC_CLK>;
+		clock-names = "iface_clk", "vsync_clk";
+		clock-rate = <0 0>;
+
 		qcom,sde-dram-channels = <2>;
 
 		/* data and reg bus scale settings */
@@ -357,18 +368,17 @@
 		reg-names = "dsi_phy";
 		gdsc-supply = <&mdss_core_gdsc>;
 		vdda-1p2-supply = <&pm8998_l26>;
-		qcom,platform-strength-ctrl = [ff 06
-						ff 06
-						ff 06
-						ff 00];
-		qcom,platform-regulator-settings = [1d
-							1d 1d 1d 1d];
-		qcom,platform-lane-config = [00 00 10 0f
-						00 00 10 0f
-						00 00 10 0f
-						00 00 10 0f
-						00 00 10 8f];
-
+		qcom,platform-strength-ctrl = [55 03
+						55 03
+						55 03
+						55 03
+						55 00];
+		qcom,platform-lane-config = [00 00 00 00
+						00 00 00 00
+						00 00 00 00
+						00 00 00 00
+						00 00 00 80];
+		qcom,platform-regulator-settings = [1d 1d 1d 1d 1d];
 		qcom,phy-supply-entries {
 			#address-cells = <1>;
 			#size-cells = <0>;
@@ -392,18 +402,17 @@
 		reg-names = "dsi_phy";
 		gdsc-supply = <&mdss_core_gdsc>;
 		vdda-1p2-supply = <&pm8998_l26>;
-		qcom,platform-strength-ctrl = [ff 06
-						ff 06
-						ff 06
-						ff 00];
-		qcom,platform-regulator-settings = [1d
-							1d 1d 1d 1d];
-		qcom,platform-lane-config = [00 00 10 0f
-						00 00 10 0f
-						00 00 10 0f
-						00 00 10 0f
-						00 00 10 8f];
-
+		qcom,platform-strength-ctrl = [55 03
+						55 03
+						55 03
+						55 03
+						55 00];
+		qcom,platform-regulator-settings = [1d 1d 1d 1d 1d];
+		qcom,platform-lane-config = [00 00 00 00
+						00 00 00 00
+						00 00 00 00
+						00 00 00 00
+						00 00 00 80];
 		qcom,phy-supply-entries {
 			#address-cells = <1>;
 			#size-cells = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sim.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sim.dtsi
index 0f94d812..a03148d 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sim.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sim.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -9,3 +9,7 @@
  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  * GNU General Public License for more details.
  */
+
+&pmi8998_charger {
+	qcom,suspend-input;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-smp2p.dtsi b/arch/arm64/boot/dts/qcom/sdm845-smp2p.dtsi
index a75b6a7..7b8b425 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-smp2p.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-smp2p.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -307,4 +307,27 @@
 		interrupt-controller;
 		#interrupt-cells = <2>;
 	};
+
+	/* ipa - outbound entry to mss */
+	smp2pgpio_ipa_1_out: qcom,smp2pgpio-ipa-1-out {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "ipa";
+		qcom,remote-pid = <1>;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
+
+	/* ipa - inbound entry from mss */
+	smp2pgpio_ipa_1_in: qcom,smp2pgpio-ipa-1-in {
+		compatible = "qcom,smp2pgpio";
+		qcom,entry-name = "ipa";
+		qcom,remote-pid = <1>;
+		qcom,is-inbound;
+		gpio-controller;
+		#gpio-cells = <2>;
+		interrupt-controller;
+		#interrupt-cells = <2>;
+	};
 };
diff --git a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
index 1c66f89..7c310cd 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-usb.dtsi
@@ -19,8 +19,6 @@
 		reg = <0x0a600000 0xf8c00>,
 		      <0x088ee000 0x400>;
 		reg-names = "core_base", "ahb2phy_base";
-		iommus = <&apps_smmu 0x740>;
-		qcom,smmu-s1-bypass;
 		#address-cells = <1>;
 		#size-cells = <1>;
 		ranges;
@@ -32,7 +30,7 @@
 		qcom,usb-dbm = <&dbm_1p5>;
 		qcom,dwc-usb3-msm-tx-fifo-size = <21288>;
 		qcom,num-gsi-evt-buffs = <0x3>;
-		extcon = <0>, <0>, <&eud>;
+		extcon = <&pmi8998_pdphy>, <&pmi8998_pdphy>, <&eud>;
 
 		clocks = <&clock_gcc GCC_USB30_PRIM_MASTER_CLK>,
 			 <&clock_gcc GCC_CFG_NOC_USB3_PRIM_AXI_CLK>,
@@ -64,6 +62,38 @@
 			snps,hird-threshold = /bits/ 8 <0x10>;
 			maximum-speed = "high-speed";
 		};
+
+		qcom,usbbam@a704000 {
+			compatible = "qcom,usb-bam-msm";
+			reg = <0xa704000 0x17000>;
+			interrupt-parent = <&intc>;
+			interrupts = <0 132 0>;
+
+			qcom,bam-type = <0>;
+			qcom,usb-bam-fifo-baseaddr = <0x146bb000>;
+			qcom,usb-bam-num-pipes = <8>;
+			qcom,ignore-core-reset-ack;
+			qcom,disable-clk-gating;
+			qcom,usb-bam-override-threshold = <0x4001>;
+			qcom,usb-bam-max-mbps-highspeed = <400>;
+			qcom,usb-bam-max-mbps-superspeed = <3600>;
+			qcom,reset-bam-on-connect;
+
+			qcom,pipe0 {
+				label = "ssusb-qdss-in-0";
+				qcom,usb-bam-mem-type = <2>;
+				qcom,dir = <1>;
+				qcom,pipe-num = <0>;
+				qcom,peer-bam = <0>;
+				qcom,peer-bam-physical-address = <0x6064000>;
+				qcom,src-bam-pipe-index = <0>;
+				qcom,dst-bam-pipe-index = <0>;
+				qcom,data-fifo-offset = <0x0>;
+				qcom,data-fifo-size = <0x1800>;
+				qcom,descriptor-fifo-offset = <0x1800>;
+				qcom,descriptor-fifo-size = <0x800>;
+			};
+		};
 	};
 
 	/* Primary USB port related QUSB2 PHY */
@@ -106,9 +136,155 @@
 		reset-names = "phy_reset";
 	};
 
-	dbm_1p5: dbm@a8f8000 {
+	/* Primary USB port related QMP USB DP Combo PHY */
+	usb_qmp_dp_phy: ssphy@88e8000 {
+		compatible = "qcom,usb-ssphy-qmp-dp-combo";
+		reg = <0x88e8000 0x3000>;
+		reg-names = "qmp_phy_base";
+
+		vdd-supply = <&pm8998_l1>;
+		core-supply = <&pm8998_l26>;
+		qcom,vdd-voltage-level = <0 880000 880000>;
+		qcom,vbus-valid-override;
+		qcom,qmp-phy-init-seq =
+		/* <reg_offset, value, delay> */
+			<0x1048 0x07 0x00 /* COM_PLL_IVCO */
+			 0x1080 0x14 0x00 /* COM_SYSCLK_EN_SEL */
+			 0x1034 0x08 0x00 /* COM_BIAS_EN_CLKBUFLR_EN */
+			 0x1137 0x30 0x00 /* COM_CLK_SELECT */
+			 0x103c 0x02 0x00 /* COM_SYS_CLK_CTRL */
+			 0x108c 0x08 0x00 /* COM_RESETSM_CNTRL2 */
+			 0x115c 0x16 0x00 /* COM_CMN_CONFIG */
+			 0x1164 0x01 0x00 /* COM_SVS_MODE_CLK_SEL */
+			 0x113c 0x80 0x00 /* COM_HSCLK_SEL */
+			 0x10b0 0x82 0x00 /* COM_DEC_START_MODE0 */
+			 0x10b8 0xab 0x00 /* COM_DIV_FRAC_START1_MODE0 */
+			 0x10bc 0xea 0x00 /* COM_DIV_FRAC_START2_MODE0 */
+			 0x10c0 0x02 0x00 /* COM_DIV_FRAC_START3_MODE0 */
+			 0x1060 0x06 0x00 /* COM_CP_CTRL_MODE0 */
+			 0x1068 0x16 0x00 /* COM_PLL_RCTRL_MODE0 */
+			 0x1070 0x36 0x00 /* COM_PLL_CCTRL_MODE0 */
+			 0x10dc 0x00 0x00 /* COM_INTEGLOOP_GAIN1_MODE0 */
+			 0x10d8 0x3f 0x00 /* COM_INTEGLOOP_GAIN0_MODE0 */
+			 0x10f8 0x01 0x00 /* COM_VCO_TUNE2_MODE0 */
+			 0x10f4 0xc9 0x00 /* COM_VCO_TUNE1_MODE0 */
+			 0x1148 0x0a 0x00 /* COM_CORECLK_DIV_MODE0 */
+			 0x10a0 0x00 0x00 /* COM_LOCK_CMP3_MODE0 */
+			 0x109c 0x34 0x00 /* COM_LOCK_CMP2_MODE0 */
+			 0x1018 0x15 0x00 /* COM_LOCK_CMP1_MODE0 */
+			 0x1090 0x04 0x00 /* COM_LOCK_CMP_EN */
+			 0x1154 0x00 0x00 /* COM_CORE_CLK_EN */
+			 0x1094 0x00 0x00 /* COM_LOCK_CMP_CFG */
+			 0x10f0 0x00 0x00 /* COM_VCO_TUNE_MAP */
+			 0x1040 0x0a 0x00 /* COM_SYSCLK_BUF_ENABLE */
+			 0x1010 0x01 0x00 /* COM_SSC_EN_CENTER */
+			 0x101c 0x31 0x00 /* COM_SSC_PER1 */
+			 0x1020 0x01 0x00 /* COM_SSC_PER2 */
+			 0x1014 0x00 0x00 /* COM_SSC_ADJ_PER1 */
+			 0x1018 0x00 0x00 /* COM_SSC_ADJ_PER2 */
+			 0x1024 0x85 0x00 /* COM_SSC_STEP_SIZE1 */
+			 0x1028 0x07 0x00 /* COM_SSC_STEP_SIZE2 */
+			 0x1430 0x0b 0x00 /* RXA_UCDR_FASTLOCK_FO_GAIN */
+			 0x14d4 0x0f 0x00 /* RXA_RX_EQU_ADAPTOR_CNTRL2 */
+			 0x14d8 0x4e 0x00 /* RXA_RX_EQU_ADAPTOR_CNTRL3 */
+			 0x14dc 0x18 0x00 /* RXA_RX_EQU_ADAPTOR_CNTRL4 */
+			 0x14f8 0x77 0x00 /* RXA_RX_EQ_OFFSET_ADAPTOR_CNTRL1 */
+			 0x14fc 0x80 0x00 /* RXA_RX_OFFSET_ADAPTOR_CNTRL2 */
+			 0x1504 0x03 0x00 /* RXA_SIGDET_CNTRL */
+			 0x150c 0x16 0x00 /* RXA_SIGDET_DEGLITCH_CNTRL */
+			 0x1830 0x0b 0x00 /* RXB_UCDR_FASTLOCK_FO_GAIN */
+			 0x18d4 0x0f 0x00 /* RXB_RX_EQU_ADAPTOR_CNTRL2 */
+			 0x18d8 0x4e 0x00 /* RXB_RX_EQU_ADAPTOR_CNTRL3 */
+			 0x18dc 0x18 0x00 /* RXB_RX_EQU_ADAPTOR_CNTRL4 */
+			 0x18f8 0x77 0x00 /* RXB_RX_EQ_OFFSET_ADAPTOR_CNTRL1 */
+			 0x18fc 0x80 0x00 /* RXB_RX_OFFSET_ADAPTOR_CNTRL2 */
+			 0x1904 0x03 0x00 /* RXB_SIGDET_CNTRL */
+			 0x190c 0x16 0x00 /* RXB_SIGDET_DEGLITCH_CNTRL */
+			 0x1260 0x10 0x00 /* TXA_HIGHZ_DRVR_EN */
+			 0x12a4 0x12 0x00 /* TXA_RCV_DETECT_LVL_2 */
+			 0x128c 0x16 0x00 /* TXA_LANE_MODE_1 */
+			 0x1648 0x09 0x00 /* TXB_RES_CODE_LANE_OFFSET_RX */
+			 0x1644 0x0d 0x00 /* TXB_RES_CODE_LANE_OFFSET_TX */
+			 0x1660 0x10 0x00 /* TXB_HIGHZ_DRVR_EN */
+			 0x16a4 0x12 0x00 /* TXB_RCV_DETECT_LVL_2 */
+			 0x168c 0x16 0x00 /* TXB_LANE_MODE_1 */
+			 0x1648 0x09 0x00 /* TXB_RES_CODE_LANE_OFFSET_RX */
+			 0x1644 0x0d 0x00 /* TXB_RES_CODE_LANE_OFFSET_TX */
+			 0x1cc8 0x83 0x00 /* PCS_FLL_CNTRL2 */
+			 0x1ccc 0x09 0x00 /* PCS_FLL_CNT_VAL_L */
+			 0x1cd0 0xa2 0x00 /* PCS_FLL_CNT_VAL_H_TOL */
+			 0x1cd4 0x40 0x00 /* PCS_FLL_MAN_CODE */
+			 0x1cc4 0x02 0x00 /* PCS_FLL_CNTRL1 */
+			 0x1c80 0xd1 0x00 /* PCS_LOCK_DETECT_CONFIG1 */
+			 0x1c84 0x1f 0x00 /* PCS_LOCK_DETECT_CONFIG2 */
+			 0x1c88 0x47 0x00 /* PCS_LOCK_DETECT_CONFIG3 */
+			 0x1c64 0x1b 0x00 /* PCS_POWER_STATE_CONFIG2 */
+			 0x1434 0x75 0x00 /* RXA_UCDR_SO_SATURATION */
+			 0x1834 0x75 0x00 /* RXB_UCDR_SO_SATURATION */
+			 0x1dd8 0xba 0x00 /* PCS_RX_SIGDET_LVL */
+			 0x1c0c 0x9f 0x00 /* PCS_TXMGN_V0 */
+			 0x1c10 0x9f 0x00 /* PCS_TXMGN_V1 */
+			 0x1c14 0xb7 0x00 /* PCS_TXMGN_V2 */
+			 0x1c18 0x4e 0x00 /* PCS_TXMGN_V3 */
+			 0x1c1c 0x65 0x00 /* PCS_TXMGN_V4 */
+			 0x1c20 0x6b 0x00 /* PCS_TXMGN_LS */
+			 0x1c24 0x15 0x00 /* PCS_TXDEEMPH_M6DB_V0 */
+			 0x1c28 0x0d 0x00 /* PCS_TXDEEMPH_M3P5DB_V0 */
+			 0x1c2c 0x15 0x00 /* PCS_TXDEEMPH_M6DB_V1 */
+			 0x1c30 0x0d 0x00 /* PCS_TXDEEMPH_M3P5DB_V1 */
+			 0x1c34 0x15 0x00 /* PCS_TXDEEMPH_M6DB_V2 */
+			 0x1c38 0x0d 0x00 /* PCS_TXDEEMPH_M3P5DB_V2 */
+			 0x1c3c 0x15 0x00 /* PCS_TXDEEMPH_M6DB_V3 */
+			 0x1c40 0x1d 0x00 /* PCS_TXDEEMPH_M3P5DB_V3 */
+			 0x1c44 0x15 0x00 /* PCS_TXDEEMPH_M6DB_V4 */
+			 0x1c48 0x0d 0x00 /* PCS_TXDEEMPH_M3P5DB_V4 */
+			 0x1c4c 0x15 0x00 /* PCS_TXDEEMPH_M6DB_LS */
+			 0x1c50 0x0d 0x00 /* PCS_TXDEEMPH_M3P5DB_LS */
+			 0x1c5c 0x02 0x00 /* PCS_RATE_SLEW_CNTRL */
+			 0x1ca0 0x04 0x00 /* PCS_PWRUP_RESET_DLY_TIME_AUXCLK */
+			 0x1c8c 0x44 0x00 /* PCS_TSYNC_RSYNC_TIME */
+			 0x1c70 0xe7 0x00 /* PCS_RCVR_DTCT_DLY_P1U2_L */
+			 0x1c74 0x03 0x00 /* PCS_RCVR_DTCT_DLY_P1U2_H */
+			 0x1c78 0x40 0x00 /* PCS_RCVR_DTCT_DLY_U3_L */
+			 0x1c7c 0x00 0x00 /* PCS_RCVR_DTCT_DLY_U3_H */
+			 0x1cb8 0x75 0x00 /* PCS_RXEQTRAINING_WAIT_TIME */
+			 0x1cb0 0x86 0x00 /* PCS_LFPS_TX_ECSTART_EQTLOCK */
+			 0x1cbc 0x13 0x00 /* PCS_RXEQTRAINING_RUN_TIME */
+			 0xffffffff 0xffffffff 0x00>;
+
+		qcom,qmp-phy-reg-offset =
+				<0x1d74 /* USB3_DP_PCS_PCS_STATUS */
+				 0x1cd8 /* USB3_DP_PCS_AUTONOMOUS_MODE_CTRL */
+				 0x1cdc /* USB3_DP_PCS_LFPS_RXTERM_IRQ_CLEAR */
+				 0x1c04 /* USB3_DP_PCS_POWER_DOWN_CONTROL */
+				 0x1c00 /* USB3_DP_PCS_SW_RESET */
+				 0x1c08 /* USB3_DP_PCS_START_CONTROL */
+				 0x2a18 /* USB3_DP_DP_PHY_PD_CTL */
+				 0x0008 /* USB3_DP_COM_POWER_DOWN_CTRL */
+				 0x0004 /* USB3_DP_COM_SW_RESET */
+				 0x001c /* USB3_DP_COM_RESET_OVRD_CTRL */
+				 0x0000 /* USB3_DP_COM_PHY_MODE_CTRL */
+				 0x0010 /* USB3_DP_COM_TYPEC_CTRL */
+				 0x000c /* USB3_DP_COM_SWI_CTRL */
+				 0x1a0c>; /* USB3_DP_PCS_MISC_CLAMP_ENABLE */
+
+		clocks = <&clock_gcc GCC_USB3_PRIM_PHY_AUX_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_PHY_PIPE_CLK>,
+			 <&clock_rpmh RPMH_CXO_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_CLKREF_CLK>,
+			 <&clock_gcc GCC_USB3_PRIM_PHY_COM_AUX_CLK>;
+
+		clock-names = "aux_clk", "pipe_clk", "ref_clk_src",
+				"ref_clk", "com_aux_clk";
+
+		resets = <&clock_gcc GCC_USB3_DP_PHY_PRIM_BCR>;
+		reset-names = "phy_reset";
+		status = "disabled";
+	};
+
+	dbm_1p5: dbm@a6f8000 {
 		compatible = "qcom,usb-dbm-1p5";
-		reg = <0xa8f8000 0x400>;
+		reg = <0xa6f8000 0x400>;
 		qcom,reset-ep-after-lpm-resume;
 	};
 
@@ -122,8 +298,6 @@
 		reg = <0x0a800000 0xf8c00>,
 		      <0x088ee000 0x400>;
 		reg-names = "core_base", "ahb2phy_base";
-		iommus = <&apps_smmu 0x760>;
-		qcom,smmu-s1-bypass;
 		#address-cells = <1>;
 		#size-cells = <1>;
 		ranges;
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index f7b2fc2..a89a57c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -21,6 +21,7 @@
 #include <dt-bindings/regulator/qcom,rpmh-regulator.h>
 #include <dt-bindings/interrupt-controller/arm-gic.h>
 #include <dt-bindings/soc/qcom,tcs-mbox.h>
+#include <dt-bindings/spmi/spmi.h>
 
 / {
 	model = "Qualcomm Technologies, Inc. SDM845";
@@ -308,37 +309,61 @@
 		pil_modem_mem: modem_region@8b000000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x8b000000 0 0x6e00000>;
+			reg = <0 0x8b000000 0 0x7300000>;
 		};
 
-		pil_video_mem: pil_video_region@91e00000 {
+		pil_video_mem: pil_video_region@92300000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x91e00000 0 0x500000>;
+			reg = <0 0x92300000 0 0x500000>;
 		};
 
-		pil_cdsp_mem: cdsp_regions@92300000 {
+		pil_cdsp_mem: cdsp_regions@92800000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x92300000 0 0x800000>;
+			reg = <0 0x92800000 0 0x800000>;
 		};
 
-		pil_adsp_mem: pil_adsp_region@92b00000 {
+		pil_adsp_mem: pil_adsp_region@93000000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x92b00000 0 0x1a00000>;
+			reg = <0 0x93000000 0 0x1a00000>;
 		};
 
-		pil_slpi_mem: pil_slpi_region@94500000 {
+		pil_mba_mem: pil_mba_region@0x94a00000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x94500000 0 0xf00000>;
+			reg = <0 0x94a00000 0 0x200000>;
 		};
 
-		pil_spss_mem: spss_region@95400000 {
+		pil_slpi_mem: pil_slpi_region@94c00000 {
 			compatible = "removed-dma-pool";
 			no-map;
-			reg = <0 0x95400000 0 0x700000>;
+			reg = <0 0x94c00000 0 0x1400000>;
+		};
+
+		pil_ipa_fw_mem: pil_ipa_fw_region@96000000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x96000000 0 0x10000>;
+		};
+
+		pil_ipa_gsi_mem: pil_ipa_gsi_region@96010000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x96010000 0 0x5000>;
+		};
+
+		pil_gpu_mem: pil_gpu_region@96015000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x96015000 0 0x1000>;
+		};
+
+		pil_spss_mem: spss_region@96100000 {
+			compatible = "removed-dma-pool";
+			no-map;
+			reg = <0 0x96100000 0 0x100000>;
 		};
 
 		adsp_mem: adsp_region {
@@ -501,6 +526,57 @@
 		cell-index = <0>;
 	};
 
+	spmi_debug_bus: qcom,spmi-debug@6b22000 {
+		compatible = "qcom,spmi-pmic-arb-debug";
+		reg = <0x6b22000 0x60>, <0x7820A8 4>;
+		reg-names = "core", "fuse";
+		qcom,fuse-disable-bit = <12>;
+		#address-cells = <2>;
+		#size-cells = <0>;
+
+		qcom,pm8998-debug@0 {
+			compatible = "qcom,spmi-pmic";
+			reg = <0x0 SPMI_USID>;
+			#address-cells = <2>;
+			#size-cells = <0>;
+		};
+
+		qcom,pm8998-debug@1 {
+			compatible = "qcom,spmi-pmic";
+			reg = <0x1 SPMI_USID>;
+			#address-cells = <2>;
+			#size-cells = <0>;
+		};
+
+		qcom,pmi8998-debug@2 {
+			compatible = "qcom,spmi-pmic";
+			reg = <0x2 SPMI_USID>;
+			#address-cells = <2>;
+			#size-cells = <0>;
+		};
+
+		qcom,pmi8998-debug@3 {
+			compatible = "qcom,spmi-pmic";
+			reg = <0x3 SPMI_USID>;
+			#address-cells = <2>;
+			#size-cells = <0>;
+		};
+
+		qcom,pm8005-debug@4 {
+			compatible = "qcom,spmi-pmic";
+			reg = <0x4 SPMI_USID>;
+			#address-cells = <2>;
+			#size-cells = <0>;
+		};
+
+		qcom,pm8005-debug@5 {
+			compatible = "qcom,spmi-pmic";
+			reg = <0x5 SPMI_USID>;
+			#address-cells = <2>;
+			#size-cells = <0>;
+		};
+	};
+
 	msm_cpufreq: qcom,msm-cpufreq {
 		compatible = "qcom,msm-cpufreq";
 		clock-names = "cpu0_clk", "cpu4_clk";
@@ -618,6 +694,16 @@
 			< 6881 /* 1804 MHz */ >;
 	};
 
+	snoc_cnoc_keepalive: qcom,snoc_cnoc_keepalive {
+		compatible = "qcom,devbw";
+		governor = "powersave";
+		qcom,src-dst-ports = <139 627>;
+		qcom,active-only;
+		status = "ok";
+		qcom,bw-tbl =
+			< 1 >;
+	};
+
 	devfreq_memlat_0: qcom,cpu0-memlat-mon {
 		compatible = "qcom,arm-memlat-mon";
 		qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3>;
@@ -798,17 +884,17 @@
 			<   422400000 0x50140116 0x00002020 0x1 2 >,
 			<   499200000 0x5014021a 0x00002020 0x1 3 >,
 			<   576000000 0x5014031e 0x00002020 0x1 4 >,
-			<   652800000 0x501c0422 0x00002020 0x1 5 >,
-			<   729600000 0x501c0526 0x00002020 0x1 6 >,
-			<   806400000 0x501c062a 0x00002222 0x1 7 >;
+			<   652800000 0x401c0422 0x00002020 0x1 5 >,
+			<   729600000 0x401c0526 0x00002020 0x1 6 >,
+			<   806400000 0x401c062a 0x00002222 0x1 7 >;
 
 		qcom,pwrcl-speedbin0-v0 =
 			<   300000000 0x000c000f 0x00002020 0x1 1 >,
 			<   422400000 0x50140116 0x00002020 0x1 2 >,
 			<   499200000 0x5014021a 0x00002020 0x1 3 >,
 			<   576000000 0x5014031e 0x00002020 0x1 4 >,
-			<   652800000 0x501c0422 0x00002020 0x1 5 >,
-			<   748800000 0x501c0527 0x00002020 0x1 6 >,
+			<   652800000 0x401c0422 0x00002020 0x1 5 >,
+			<   748800000 0x401c0527 0x00002020 0x1 6 >,
 			<   825600000 0x401c062b 0x00002222 0x1 7 >,
 			<   902400000 0x4024072f 0x00002626 0x1 8 >,
 			<   979200000 0x40240833 0x00002929 0x1 9 >,
@@ -821,9 +907,9 @@
 			<   422400000 0x50140116 0x00002020 0x1 2 >,
 			<   499200000 0x5014021a 0x00002020 0x1 3 >,
 			<   576000000 0x5014031e 0x00002020 0x1 4 >,
-			<   652800000 0x501c0422 0x00002020 0x1 5 >,
-			<   729600000 0x501c0526 0x00002020 0x1 6 >,
-			<   806400000 0x501c062a 0x00002222 0x1 7 >,
+			<   652800000 0x401c0422 0x00002020 0x1 5 >,
+			<   729600000 0x401c0526 0x00002020 0x1 6 >,
+			<   806400000 0x401c062a 0x00002222 0x1 7 >,
 			<   883200000 0x4024072b 0x00002525 0x1 8 >,
 			<   960000000 0x40240832 0x00002828 0x1 9 >,
 			<  1036800000 0x40240936 0x00002b2b 0x1 10 >,
@@ -1315,7 +1401,7 @@
 
 		qcom,pas-id = <18>;
 		qcom,proxy-timeout-ms = <10000>;
-		qcom,smem-id = <423>;
+		qcom,smem-id = <601>;
 		qcom,sysmon-id = <7>;
 		qcom,ssctl-instance-id = <0x17>;
 		qcom,firmware-name = "cdsp";
@@ -1985,6 +2071,17 @@
 			0x1ffc	/* apps_v6_rt_nhash_ofst; */
 			0x0	/* apps_v6_rt_nhash_size; */
 		>;
+
+		/* smp2p gpio information */
+		qcom,smp2pgpio_map_ipa_1_out {
+			compatible = "qcom,smp2pgpio-map-ipa-1-out";
+			gpios = <&smp2pgpio_ipa_1_out 0 0>;
+		};
+
+		qcom,smp2pgpio_map_ipa_1_in {
+			compatible = "qcom,smp2pgpio-map-ipa-1-in";
+			gpios = <&smp2pgpio_ipa_1_in 0 0>;
+		};
 	};
 
 	qcom,ipa_fws {
@@ -2040,6 +2137,29 @@
 		compatible = "qcom,apss-core-ea";
 		reg = <0x780000 0x1000>;
 	};
+
+	qcom,icnss@18800000 {
+		compatible = "qcom,icnss";
+		reg = <0x18800000 0x800000>,
+		      <0xa0000000 0x10000000>,
+		      <0xb0000000 0x10000>;
+		reg-names = "membase", "smmu_iova_base", "smmu_iova_ipa";
+		iommus = <&apps_smmu 0x0040>,
+			 <&apps_smmu 0x0041>;
+		interrupts = <0 414 0 /* CE0 */ >,
+			     <0 415 0 /* CE1 */ >,
+			     <0 416 0 /* CE2 */ >,
+			     <0 417 0 /* CE3 */ >,
+			     <0 418 0 /* CE4 */ >,
+			     <0 419 0 /* CE5 */ >,
+			     <0 420 0 /* CE6 */ >,
+			     <0 421 0 /* CE7 */ >,
+			     <0 422 0 /* CE8 */ >,
+			     <0 423 0 /* CE9 */ >,
+			     <0 424 0 /* CE10 */ >,
+			     <0 425 0 /* CE11 */ >;
+		qcom,wlan-msa-memory = <0x100000>;
+	};
 };
 
 &pcie_0_gdsc {
@@ -2127,6 +2247,9 @@
 };
 
 &gpu_gx_gdsc {
+	clock-names = "core_root_clk";
+	clocks = <&clock_gfx GPU_CC_GX_GFX3D_CLK_SRC>;
+	qcom,force-enable-root-clk;
 	parent-supply = <&pm8005_s1_level>;
 	status = "ok";
 };
@@ -2158,3 +2281,4 @@
 #include "sdm845-pinctrl.dtsi"
 #include "sdm845-audio.dtsi"
 #include "sdm845-gpu.dtsi"
+#include "sdm845-usb.dtsi"
diff --git a/arch/arm64/configs/sdm845-perf_defconfig b/arch/arm64/configs/sdm845-perf_defconfig
index 4a13b7a..e39097c 100644
--- a/arch/arm64/configs/sdm845-perf_defconfig
+++ b/arch/arm64/configs/sdm845-perf_defconfig
@@ -1,5 +1,6 @@
 CONFIG_LOCALVERSION="-perf"
 # CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_FHANDLE is not set
 CONFIG_AUDIT=y
 # CONFIG_AUDITSYSCALL is not set
 CONFIG_NO_HZ=y
@@ -246,6 +247,7 @@
 CONFIG_DUMMY=y
 CONFIG_TUN=y
 CONFIG_SKY2=y
+CONFIG_RNDIS_IPA=y
 CONFIG_SMSC911X=y
 CONFIG_PPP=y
 CONFIG_PPP_BSDCOMP=y
@@ -268,6 +270,7 @@
 # CONFIG_LEGACY_PTYS is not set
 # CONFIG_DEVMEM is not set
 # CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_MSM_GENI=y
 CONFIG_DIAG_CHAR=y
 CONFIG_HW_RANDOM=y
 CONFIG_MSM_ADSPRPC=y
@@ -280,6 +283,7 @@
 CONFIG_SPI_SPIDEV=y
 CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
 CONFIG_PINCTRL_SDM845=y
 CONFIG_PINCTRL_SDM830=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
@@ -395,6 +399,10 @@
 CONFIG_ANDROID_LOW_MEMORY_KILLER=y
 CONFIG_ION=y
 CONFIG_ION_MSM=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_IPA_UT=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
 CONFIG_QPNP_COINCELL=y
@@ -408,6 +416,7 @@
 CONFIG_MSM_CLK_RPMH=y
 CONFIG_CLOCK_CPU_OSM=y
 CONFIG_MSM_GPUCC_SDM845=y
+CONFIG_QCOM_MDSS_PLL=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MSM_QMP=y
 CONFIG_IOMMU_IO_PGTABLE_FAST=y
@@ -457,6 +466,7 @@
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_EXTCON=y
+CONFIG_EXTCON_USB_GPIO=y
 CONFIG_IIO=y
 CONFIG_QCOM_RRADC=y
 CONFIG_PWM=y
@@ -464,7 +474,7 @@
 CONFIG_ARM_GIC_V3_ACL=y
 CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
-CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder"
+CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder,vndbinder"
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT3_FS=y
diff --git a/arch/arm64/configs/sdm845_defconfig b/arch/arm64/configs/sdm845_defconfig
index 7333731..bd24068 100644
--- a/arch/arm64/configs/sdm845_defconfig
+++ b/arch/arm64/configs/sdm845_defconfig
@@ -1,4 +1,5 @@
 # CONFIG_LOCALVERSION_AUTO is not set
+# CONFIG_FHANDLE is not set
 CONFIG_AUDIT=y
 # CONFIG_AUDITSYSCALL is not set
 CONFIG_NO_HZ=y
@@ -255,6 +256,7 @@
 CONFIG_BONDING=y
 CONFIG_DUMMY=y
 CONFIG_TUN=y
+CONFIG_RNDIS_IPA=y
 CONFIG_PPP=y
 CONFIG_PPP_BSDCOMP=y
 CONFIG_PPP_DEFLATE=y
@@ -275,6 +277,8 @@
 # CONFIG_SERIO_SERPORT is not set
 # CONFIG_VT is not set
 # CONFIG_LEGACY_PTYS is not set
+CONFIG_SERIAL_MSM_GENI=y
+CONFIG_SERIAL_MSM_GENI_CONSOLE=y
 CONFIG_DIAG_CHAR=y
 CONFIG_HVC_DCC=y
 CONFIG_HVC_DCC_SERIALIZE_SMP=y
@@ -289,6 +293,7 @@
 CONFIG_SPI_SPIDEV=y
 CONFIG_SLIMBUS_MSM_NGD=y
 CONFIG_SPMI=y
+CONFIG_SPMI_MSM_PMIC_ARB_DEBUG=y
 CONFIG_PINCTRL_SDM845=y
 CONFIG_PINCTRL_SDM830=y
 CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
@@ -412,6 +417,10 @@
 CONFIG_ANDROID_LOW_MEMORY_KILLER=y
 CONFIG_ION=y
 CONFIG_ION_MSM=y
+CONFIG_GSI=y
+CONFIG_IPA3=y
+CONFIG_RMNET_IPA3=y
+CONFIG_IPA_UT=y
 CONFIG_SPS=y
 CONFIG_SPS_SUPPORT_NDP_BAM=y
 CONFIG_QPNP_COINCELL=y
@@ -425,6 +434,7 @@
 CONFIG_MSM_CLK_RPMH=y
 CONFIG_CLOCK_CPU_OSM=y
 CONFIG_MSM_GPUCC_SDM845=y
+CONFIG_QCOM_MDSS_PLL=y
 CONFIG_REMOTE_SPINLOCK_MSM=y
 CONFIG_MSM_QMP=y
 CONFIG_IOMMU_IO_PGTABLE_FAST=y
@@ -479,6 +489,7 @@
 CONFIG_DEVFREQ_SIMPLE_DEV=y
 CONFIG_QCOM_DEVFREQ_DEVBW=y
 CONFIG_EXTCON=y
+CONFIG_EXTCON_USB_GPIO=y
 CONFIG_IIO=y
 CONFIG_QCOM_RRADC=y
 CONFIG_PWM=y
@@ -487,7 +498,7 @@
 CONFIG_PHY_XGENE=y
 CONFIG_ANDROID=y
 CONFIG_ANDROID_BINDER_IPC=y
-CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder"
+CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder,vndbinder"
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT3_FS=y
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 860c3b6..40e775a 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -1743,7 +1743,11 @@
 {
 	struct dma_iommu_mapping *mapping = dev->archdata.mapping;
 	dma_addr_t dma_addr;
-	int ret, prot, len = PAGE_ALIGN(size + offset);
+	int ret, prot, len, start_offset, map_offset;
+
+	map_offset = offset & ~PAGE_MASK;
+	start_offset = offset & PAGE_MASK;
+	len = PAGE_ALIGN(map_offset + size);
 
 	dma_addr = __alloc_iova(mapping, len);
 	if (dma_addr == DMA_ERROR_CODE)
@@ -1753,12 +1757,12 @@
 	prot = __get_iommu_pgprot(attrs, prot,
 				  is_dma_coherent(dev, attrs));
 
-	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len,
-			prot);
+	ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page) +
+			start_offset, len, prot);
 	if (ret < 0)
 		goto fail;
 
-	return dma_addr + offset;
+	return dma_addr + map_offset;
 fail:
 	__free_iova(mapping, dma_addr, len);
 	return DMA_ERROR_CODE;
@@ -1897,7 +1901,11 @@
 	if (!mapping)
 		goto err;
 
-	mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+	mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL | __GFP_NOWARN |
+							__GFP_NORETRY);
+	if (!mapping->bitmap)
+		mapping->bitmap = vzalloc(bitmap_size);
+
 	if (!mapping->bitmap)
 		goto err2;
 
@@ -1912,7 +1920,7 @@
 	kref_init(&mapping->kref);
 	return mapping;
 err3:
-	kfree(mapping->bitmap);
+	kvfree(mapping->bitmap);
 err2:
 	kfree(mapping);
 err:
@@ -1926,7 +1934,7 @@
 		container_of(kref, struct dma_iommu_mapping, kref);
 
 	iommu_domain_free(mapping->domain);
-	kfree(mapping->bitmap);
+	kvfree(mapping->bitmap);
 	kfree(mapping);
 }
 
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 8ac0e59..0ddf369 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -269,6 +269,11 @@
 DEFINE_HWx_IRQDISPATCH(5)
 #endif
 
+static void ltq_hw_irq_handler(struct irq_desc *desc)
+{
+	ltq_hw_irqdispatch(irq_desc_get_irq(desc) - 2);
+}
+
 #ifdef CONFIG_MIPS_MT_SMP
 void __init arch_init_ipiirq(int irq, struct irqaction *action)
 {
@@ -313,23 +318,19 @@
 asmlinkage void plat_irq_dispatch(void)
 {
 	unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
-	unsigned int i;
+	int irq;
 
-	if ((MIPS_CPU_TIMER_IRQ == 7) && (pending & CAUSEF_IP7)) {
-		do_IRQ(MIPS_CPU_TIMER_IRQ);
-		goto out;
-	} else {
-		for (i = 0; i < MAX_IM; i++) {
-			if (pending & (CAUSEF_IP2 << i)) {
-				ltq_hw_irqdispatch(i);
-				goto out;
-			}
-		}
+	if (!pending) {
+		spurious_interrupt();
+		return;
 	}
-	pr_alert("Spurious IRQ: CAUSE=0x%08x\n", read_c0_status());
 
-out:
-	return;
+	pending >>= CAUSEB_IP;
+	while (pending) {
+		irq = fls(pending) - 1;
+		do_IRQ(MIPS_CPU_IRQ_BASE + irq);
+		pending &= ~BIT(irq);
+	}
 }
 
 static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
@@ -354,11 +355,6 @@
 	.map = icu_map,
 };
 
-static struct irqaction cascade = {
-	.handler = no_action,
-	.name = "cascade",
-};
-
 int __init icu_of_init(struct device_node *node, struct device_node *parent)
 {
 	struct device_node *eiu_node;
@@ -390,7 +386,7 @@
 	mips_cpu_irq_init();
 
 	for (i = 0; i < MAX_IM; i++)
-		setup_irq(i + 2, &cascade);
+		irq_set_chained_handler(i + 2, ltq_hw_irq_handler);
 
 	if (cpu_has_vint) {
 		pr_info("Setting up vectored interrupts\n");
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 9a2aee1..7fcf512 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -68,6 +68,15 @@
 	".previous\n"
 
 /*
+ * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry
+ * (with lowest bit set) for which the fault handler in fixup_exception() will
+ * load -EFAULT into %r8 for a read or write fault, and zeroes the target
+ * register in case of a read fault in get_user().
+ */
+#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\
+	ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1)
+
+/*
  * The page fault handler stores, in a per-cpu area, the following information
  * if a fixup routine is available.
  */
@@ -94,7 +103,7 @@
 #define __get_user(x, ptr)                               \
 ({                                                       \
 	register long __gu_err __asm__ ("r8") = 0;       \
-	register long __gu_val __asm__ ("r9") = 0;       \
+	register long __gu_val;				 \
 							 \
 	load_sr2();					 \
 	switch (sizeof(*(ptr))) {			 \
@@ -110,22 +119,23 @@
 })
 
 #define __get_user_asm(ldx, ptr)                        \
-	__asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t"	\
-		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\
+	__asm__("1: " ldx " 0(%%sr2,%2),%0\n"		\
+		"9:\n"					\
+		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	\
 		: "=r"(__gu_val), "=r"(__gu_err)        \
-		: "r"(ptr), "1"(__gu_err)		\
-		: "r1");
+		: "r"(ptr), "1"(__gu_err));
 
 #if !defined(CONFIG_64BIT)
 
 #define __get_user_asm64(ptr) 				\
-	__asm__("\n1:\tldw 0(%%sr2,%2),%0"		\
-		"\n2:\tldw 4(%%sr2,%2),%R0\n\t"		\
-		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\
-		ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\
+	__asm__("   copy %%r0,%R0\n"			\
+		"1: ldw 0(%%sr2,%2),%0\n"		\
+		"2: ldw 4(%%sr2,%2),%R0\n"		\
+		"9:\n"					\
+		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	\
+		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b)	\
 		: "=r"(__gu_val), "=r"(__gu_err)	\
-		: "r"(ptr), "1"(__gu_err)		\
-		: "r1");
+		: "r"(ptr), "1"(__gu_err));
 
 #endif /* !defined(CONFIG_64BIT) */
 
@@ -151,32 +161,31 @@
  * The "__put_user/kernel_asm()" macros tell gcc they read from memory
  * instead of writing. This is because they do not write to any memory
  * gcc knows about, so there are no aliasing issues. These macros must
- * also be aware that "fixup_put_user_skip_[12]" are executed in the
- * context of the fault, and any registers used there must be listed
- * as clobbers. In this case only "r1" is used by the current routines.
- * r8/r9 are already listed as err/val.
+ * also be aware that fixups are executed in the context of the fault,
+ * and any registers used there must be listed as clobbers.
+ * r8 is already listed as err.
  */
 
 #define __put_user_asm(stx, x, ptr)                         \
 	__asm__ __volatile__ (                              \
-		"\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t"	    \
-		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\
+		"1: " stx " %2,0(%%sr2,%1)\n"		    \
+		"9:\n"					    \
+		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	    \
 		: "=r"(__pu_err)                            \
-		: "r"(ptr), "r"(x), "0"(__pu_err)	    \
-		: "r1")
+		: "r"(ptr), "r"(x), "0"(__pu_err))
 
 
 #if !defined(CONFIG_64BIT)
 
 #define __put_user_asm64(__val, ptr) do {	    	    \
 	__asm__ __volatile__ (				    \
-		"\n1:\tstw %2,0(%%sr2,%1)"		    \
-		"\n2:\tstw %R2,4(%%sr2,%1)\n\t"		    \
-		ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\
-		ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\
+		"1: stw %2,0(%%sr2,%1)\n"		    \
+		"2: stw %R2,4(%%sr2,%1)\n"		    \
+		"9:\n"					    \
+		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b)	    \
+		ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b)	    \
 		: "=r"(__pu_err)                            \
-		: "r"(ptr), "r"(__val), "0"(__pu_err) \
-		: "r1");				    \
+		: "r"(ptr), "r"(__val), "0"(__pu_err));	    \
 } while (0)
 
 #endif /* !defined(CONFIG_64BIT) */
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index 3cad8aa..4e6f0d9 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -47,16 +47,6 @@
 EXPORT_SYMBOL(lclear_user);
 EXPORT_SYMBOL(lstrnlen_user);
 
-/* Global fixups - defined as int to avoid creation of function pointers */
-extern int fixup_get_user_skip_1;
-extern int fixup_get_user_skip_2;
-extern int fixup_put_user_skip_1;
-extern int fixup_put_user_skip_2;
-EXPORT_SYMBOL(fixup_get_user_skip_1);
-EXPORT_SYMBOL(fixup_get_user_skip_2);
-EXPORT_SYMBOL(fixup_put_user_skip_1);
-EXPORT_SYMBOL(fixup_put_user_skip_2);
-
 #ifndef CONFIG_64BIT
 /* Needed so insmod can set dp value */
 extern int $global$;
diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c
index e81afc37..e7ffde2 100644
--- a/arch/parisc/kernel/process.c
+++ b/arch/parisc/kernel/process.c
@@ -140,6 +140,8 @@
 	printk(KERN_EMERG "System shut down completed.\n"
 	       "Please power this system off now.");
 
+	/* prevent soft lockup/stalled CPU messages for endless loop. */
+	rcu_sysrq_start();
 	for (;;);
 }
 
diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile
index 8fa92b8..f2dac4d 100644
--- a/arch/parisc/lib/Makefile
+++ b/arch/parisc/lib/Makefile
@@ -2,7 +2,7 @@
 # Makefile for parisc-specific library files
 #
 
-lib-y	:= lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \
+lib-y	:= lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \
 	   ucmpdi2.o delay.o
 
 obj-y	:= iomap.o
diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S
deleted file mode 100644
index a5b72f2..0000000
--- a/arch/parisc/lib/fixup.S
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Linux/PA-RISC Project (http://www.parisc-linux.org/)
- *
- *  Copyright (C) 2004  Randolph Chung <tausq@debian.org>
- *
- *    This program is free software; you can redistribute it and/or modify
- *    it under the terms of the GNU General Public License as published by
- *    the Free Software Foundation; either version 2, or (at your option)
- *    any later version.
- *
- *    This program is distributed in the hope that it will be useful,
- *    but WITHOUT ANY WARRANTY; without even the implied warranty of
- *    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *    GNU General Public License for more details.
- *
- *    You should have received a copy of the GNU General Public License
- *    along with this program; if not, write to the Free Software
- *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- * 
- * Fixup routines for kernel exception handling.
- */
-#include <asm/asm-offsets.h>
-#include <asm/assembly.h>
-#include <asm/errno.h>
-#include <linux/linkage.h>
-
-#ifdef CONFIG_SMP
-	.macro  get_fault_ip t1 t2
-	loadgp
-	addil LT%__per_cpu_offset,%r27
-	LDREG RT%__per_cpu_offset(%r1),\t1
-	/* t2 = smp_processor_id() */
-	mfctl 30,\t2
-	ldw TI_CPU(\t2),\t2
-#ifdef CONFIG_64BIT
-	extrd,u \t2,63,32,\t2
-#endif
-	/* t2 = &__per_cpu_offset[smp_processor_id()]; */
-	LDREGX \t2(\t1),\t2 
-	addil LT%exception_data,%r27
-	LDREG RT%exception_data(%r1),\t1
-	/* t1 = this_cpu_ptr(&exception_data) */
-	add,l \t1,\t2,\t1
-	/* %r27 = t1->fault_gp - restore gp */
-	LDREG EXCDATA_GP(\t1), %r27
-	/* t1 = t1->fault_ip */
-	LDREG EXCDATA_IP(\t1), \t1
-	.endm
-#else
-	.macro  get_fault_ip t1 t2
-	loadgp
-	/* t1 = this_cpu_ptr(&exception_data) */
-	addil LT%exception_data,%r27
-	LDREG RT%exception_data(%r1),\t2
-	/* %r27 = t2->fault_gp - restore gp */
-	LDREG EXCDATA_GP(\t2), %r27
-	/* t1 = t2->fault_ip */
-	LDREG EXCDATA_IP(\t2), \t1
-	.endm
-#endif
-
-	.level LEVEL
-
-	.text
-	.section .fixup, "ax"
-
-	/* get_user() fixups, store -EFAULT in r8, and 0 in r9 */
-ENTRY_CFI(fixup_get_user_skip_1)
-	get_fault_ip %r1,%r8
-	ldo 4(%r1), %r1
-	ldi -EFAULT, %r8
-	bv %r0(%r1)
-	copy %r0, %r9
-ENDPROC_CFI(fixup_get_user_skip_1)
-
-ENTRY_CFI(fixup_get_user_skip_2)
-	get_fault_ip %r1,%r8
-	ldo 8(%r1), %r1
-	ldi -EFAULT, %r8
-	bv %r0(%r1)
-	copy %r0, %r9
-ENDPROC_CFI(fixup_get_user_skip_2)
-
-	/* put_user() fixups, store -EFAULT in r8 */
-ENTRY_CFI(fixup_put_user_skip_1)
-	get_fault_ip %r1,%r8
-	ldo 4(%r1), %r1
-	bv %r0(%r1)
-	ldi -EFAULT, %r8
-ENDPROC_CFI(fixup_put_user_skip_1)
-
-ENTRY_CFI(fixup_put_user_skip_2)
-	get_fault_ip %r1,%r8
-	ldo 8(%r1), %r1
-	bv %r0(%r1)
-	ldi -EFAULT, %r8
-ENDPROC_CFI(fixup_put_user_skip_2)
-
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index 56845de..f01188c 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -5,6 +5,8 @@
  *    Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
  *    Copyright (C) 2001 Matthieu Delahaye <delahaym at esiee.fr>
  *    Copyright (C) 2003 Randolph Chung <tausq with parisc-linux.org>
+ *    Copyright (C) 2017 Helge Deller <deller@gmx.de>
+ *    Copyright (C) 2017 John David Anglin <dave.anglin@bell.net>
  *
  *
  *    This program is free software; you can redistribute it and/or modify
@@ -132,4 +134,320 @@
 
 	.procend
 
+
+
+/*
+ * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
+ *
+ * Inputs:
+ * - sr1 already contains space of source region
+ * - sr2 already contains space of destination region
+ *
+ * Returns:
+ * - number of bytes that could not be copied.
+ *   On success, this will be zero.
+ *
+ * This code is based on a C-implementation of a copy routine written by
+ * Randolph Chung, which in turn was derived from the glibc.
+ *
+ * Several strategies are tried to try to get the best performance for various
+ * conditions. In the optimal case, we copy by loops that copy 32- or 16-bytes
+ * at a time using general registers.  Unaligned copies are handled either by
+ * aligning the destination and then using shift-and-write method, or in a few
+ * cases by falling back to a byte-at-a-time copy.
+ *
+ * Testing with various alignments and buffer sizes shows that this code is
+ * often >10x faster than a simple byte-at-a-time copy, even for strangely
+ * aligned operands. It is interesting to note that the glibc version of memcpy
+ * (written in C) is actually quite fast already. This routine is able to beat
+ * it by 30-40% for aligned copies because of the loop unrolling, but in some
+ * cases the glibc version is still slightly faster. This lends more
+ * credibility that gcc can generate very good code as long as we are careful.
+ *
+ * Possible optimizations:
+ * - add cache prefetching
+ * - try not to use the post-increment address modifiers; they may create
+ *   additional interlocks. Assumption is that those were only efficient on old
+ *   machines (pre PA8000 processors)
+ */
+
+	dst = arg0
+	src = arg1
+	len = arg2
+	end = arg3
+	t1  = r19
+	t2  = r20
+	t3  = r21
+	t4  = r22
+	srcspc = sr1
+	dstspc = sr2
+
+	t0 = r1
+	a1 = t1
+	a2 = t2
+	a3 = t3
+	a0 = t4
+
+	save_src = ret0
+	save_dst = ret1
+	save_len = r31
+
+ENTRY_CFI(pa_memcpy)
+	.proc
+	.callinfo NO_CALLS
+	.entry
+
+	/* Last destination address */
+	add	dst,len,end
+
+	/* short copy with less than 16 bytes? */
+	cmpib,>>=,n 15,len,.Lbyte_loop
+
+	/* same alignment? */
+	xor	src,dst,t0
+	extru	t0,31,2,t1
+	cmpib,<>,n  0,t1,.Lunaligned_copy
+
+#ifdef CONFIG_64BIT
+	/* only do 64-bit copies if we can get aligned. */
+	extru	t0,31,3,t1
+	cmpib,<>,n  0,t1,.Lalign_loop32
+
+	/* loop until we are 64-bit aligned */
+.Lalign_loop64:
+	extru	dst,31,3,t1
+	cmpib,=,n	0,t1,.Lcopy_loop_16
+20:	ldb,ma	1(srcspc,src),t1
+21:	stb,ma	t1,1(dstspc,dst)
+	b	.Lalign_loop64
+	ldo	-1(len),len
+
+	ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+	ldi	31,t0
+.Lcopy_loop_16:
+	cmpb,COND(>>=),n t0,len,.Lword_loop
+
+10:	ldd	0(srcspc,src),t1
+11:	ldd	8(srcspc,src),t2
+	ldo	16(src),src
+12:	std,ma	t1,8(dstspc,dst)
+13:	std,ma	t2,8(dstspc,dst)
+14:	ldd	0(srcspc,src),t1
+15:	ldd	8(srcspc,src),t2
+	ldo	16(src),src
+16:	std,ma	t1,8(dstspc,dst)
+17:	std,ma	t2,8(dstspc,dst)
+
+	ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy16_fault)
+	ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy16_fault)
+	ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
+
+	b	.Lcopy_loop_16
+	ldo	-32(len),len
+
+.Lword_loop:
+	cmpib,COND(>>=),n 3,len,.Lbyte_loop
+20:	ldw,ma	4(srcspc,src),t1
+21:	stw,ma	t1,4(dstspc,dst)
+	b	.Lword_loop
+	ldo	-4(len),len
+
+	ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+#endif /* CONFIG_64BIT */
+
+	/* loop until we are 32-bit aligned */
+.Lalign_loop32:
+	extru	dst,31,2,t1
+	cmpib,=,n	0,t1,.Lcopy_loop_4
+20:	ldb,ma	1(srcspc,src),t1
+21:	stb,ma	t1,1(dstspc,dst)
+	b	.Lalign_loop32
+	ldo	-1(len),len
+
+	ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+
+.Lcopy_loop_4:
+	cmpib,COND(>>=),n 15,len,.Lbyte_loop
+
+10:	ldw	0(srcspc,src),t1
+11:	ldw	4(srcspc,src),t2
+12:	stw,ma	t1,4(dstspc,dst)
+13:	stw,ma	t2,4(dstspc,dst)
+14:	ldw	8(srcspc,src),t1
+15:	ldw	12(srcspc,src),t2
+	ldo	16(src),src
+16:	stw,ma	t1,4(dstspc,dst)
+17:	stw,ma	t2,4(dstspc,dst)
+
+	ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy8_fault)
+	ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy8_fault)
+	ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done)
+
+	b	.Lcopy_loop_4
+	ldo	-16(len),len
+
+.Lbyte_loop:
+	cmpclr,COND(<>) len,%r0,%r0
+	b,n	.Lcopy_done
+20:	ldb	0(srcspc,src),t1
+	ldo	1(src),src
+21:	stb,ma	t1,1(dstspc,dst)
+	b	.Lbyte_loop
+	ldo	-1(len),len
+
+	ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+.Lcopy_done:
+	bv	%r0(%r2)
+	sub	end,dst,ret0
+
+
+	/* src and dst are not aligned the same way. */
+	/* need to go the hard way */
+.Lunaligned_copy:
+	/* align until dst is 32bit-word-aligned */
+	extru	dst,31,2,t1
+	cmpib,COND(=),n	0,t1,.Lcopy_dstaligned
+20:	ldb	0(srcspc,src),t1
+	ldo	1(src),src
+21:	stb,ma	t1,1(dstspc,dst)
+	b	.Lunaligned_copy
+	ldo	-1(len),len
+
+	ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done)
+	ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done)
+
+.Lcopy_dstaligned:
+
+	/* store src, dst and len in safe place */
+	copy	src,save_src
+	copy	dst,save_dst
+	copy	len,save_len
+
+	/* len now needs give number of words to copy */
+	SHRREG	len,2,len
+
+	/*
+	 * Copy from a not-aligned src to an aligned dst using shifts.
+	 * Handles 4 words per loop.
+	 */
+
+	depw,z src,28,2,t0
+	subi 32,t0,t0
+	mtsar t0
+	extru len,31,2,t0
+	cmpib,= 2,t0,.Lcase2
+	/* Make src aligned by rounding it down.  */
+	depi 0,31,2,src
+
+	cmpiclr,<> 3,t0,%r0
+	b,n .Lcase3
+	cmpiclr,<> 1,t0,%r0
+	b,n .Lcase1
+.Lcase0:
+	cmpb,= %r0,len,.Lcda_finish
+	nop
+
+1:	ldw,ma 4(srcspc,src), a3
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1:	ldw,ma 4(srcspc,src), a0
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	b,n .Ldo3
+.Lcase1:
+1:	ldw,ma 4(srcspc,src), a2
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1:	ldw,ma 4(srcspc,src), a3
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	ldo -1(len),len
+	cmpb,=,n %r0,len,.Ldo0
+.Ldo4:
+1:	ldw,ma 4(srcspc,src), a0
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	shrpw a2, a3, %sar, t0
+1:	stw,ma t0, 4(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo3:
+1:	ldw,ma 4(srcspc,src), a1
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	shrpw a3, a0, %sar, t0
+1:	stw,ma t0, 4(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo2:
+1:	ldw,ma 4(srcspc,src), a2
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	shrpw a0, a1, %sar, t0
+1:	stw,ma t0, 4(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+.Ldo1:
+1:	ldw,ma 4(srcspc,src), a3
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	shrpw a1, a2, %sar, t0
+1:	stw,ma t0, 4(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+	ldo -4(len),len
+	cmpb,<> %r0,len,.Ldo4
+	nop
+.Ldo0:
+	shrpw a2, a3, %sar, t0
+1:	stw,ma t0, 4(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done)
+
+.Lcda_rdfault:
+.Lcda_finish:
+	/* calculate new src, dst and len and jump to byte-copy loop */
+	sub	dst,save_dst,t0
+	add	save_src,t0,src
+	b	.Lbyte_loop
+	sub	save_len,t0,len
+
+.Lcase3:
+1:	ldw,ma 4(srcspc,src), a0
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1:	ldw,ma 4(srcspc,src), a1
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	b .Ldo2
+	ldo 1(len),len
+.Lcase2:
+1:	ldw,ma 4(srcspc,src), a1
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+1:	ldw,ma 4(srcspc,src), a2
+	ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault)
+	b .Ldo1
+	ldo 2(len),len
+
+
+	/* fault exception fixup handlers: */
+#ifdef CONFIG_64BIT
+.Lcopy16_fault:
+10:	b	.Lcopy_done
+	std,ma	t1,8(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+#endif
+
+.Lcopy8_fault:
+10:	b	.Lcopy_done
+	stw,ma	t1,4(dstspc,dst)
+	ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done)
+
+	.exit
+ENDPROC_CFI(pa_memcpy)
+	.procend
+
 	.end
diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c
index f82ff10..b3d47ec 100644
--- a/arch/parisc/lib/memcpy.c
+++ b/arch/parisc/lib/memcpy.c
@@ -2,7 +2,7 @@
  *    Optimized memory copy routines.
  *
  *    Copyright (C) 2004 Randolph Chung <tausq@debian.org>
- *    Copyright (C) 2013 Helge Deller <deller@gmx.de>
+ *    Copyright (C) 2013-2017 Helge Deller <deller@gmx.de>
  *
  *    This program is free software; you can redistribute it and/or modify
  *    it under the terms of the GNU General Public License as published by
@@ -21,474 +21,21 @@
  *    Portions derived from the GNU C Library
  *    Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc.
  *
- * Several strategies are tried to try to get the best performance for various
- * conditions. In the optimal case, we copy 64-bytes in an unrolled loop using 
- * fp regs. This is followed by loops that copy 32- or 16-bytes at a time using
- * general registers.  Unaligned copies are handled either by aligning the 
- * destination and then using shift-and-write method, or in a few cases by 
- * falling back to a byte-at-a-time copy.
- *
- * I chose to implement this in C because it is easier to maintain and debug,
- * and in my experiments it appears that the C code generated by gcc (3.3/3.4
- * at the time of writing) is fairly optimal. Unfortunately some of the 
- * semantics of the copy routine (exception handling) is difficult to express
- * in C, so we have to play some tricks to get it to work.
- *
- * All the loads and stores are done via explicit asm() code in order to use
- * the right space registers. 
- * 
- * Testing with various alignments and buffer sizes shows that this code is 
- * often >10x faster than a simple byte-at-a-time copy, even for strangely
- * aligned operands. It is interesting to note that the glibc version
- * of memcpy (written in C) is actually quite fast already. This routine is 
- * able to beat it by 30-40% for aligned copies because of the loop unrolling, 
- * but in some cases the glibc version is still slightly faster. This lends 
- * more credibility that gcc can generate very good code as long as we are 
- * careful.
- *
- * TODO:
- * - cache prefetching needs more experimentation to get optimal settings
- * - try not to use the post-increment address modifiers; they create additional
- *   interlocks
- * - replace byte-copy loops with stybs sequences
  */
 
-#ifdef __KERNEL__
 #include <linux/module.h>
 #include <linux/compiler.h>
 #include <linux/uaccess.h>
-#define s_space "%%sr1"
-#define d_space "%%sr2"
-#else
-#include "memcpy.h"
-#define s_space "%%sr0"
-#define d_space "%%sr0"
-#define pa_memcpy new2_copy
-#endif
 
 DECLARE_PER_CPU(struct exception_data, exception_data);
 
-#define preserve_branch(label)	do {					\
-	volatile int dummy = 0;						\
-	/* The following branch is never taken, it's just here to  */	\
-	/* prevent gcc from optimizing away our exception code. */ 	\
-	if (unlikely(dummy != dummy))					\
-		goto label;						\
-} while (0)
-
 #define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3))
 #define get_kernel_space() (0)
 
-#define MERGE(w0, sh_1, w1, sh_2)  ({					\
-	unsigned int _r;						\
-	asm volatile (							\
-	"mtsar %3\n"							\
-	"shrpw %1, %2, %%sar, %0\n"					\
-	: "=r"(_r)							\
-	: "r"(w0), "r"(w1), "r"(sh_2)					\
-	);								\
-	_r;								\
-})
-#define THRESHOLD	16
-
-#ifdef DEBUG_MEMCPY
-#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0)
-#else
-#define DPRINTF(fmt, args...)
-#endif
-
-#define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e)	\
-	__asm__ __volatile__ (				\
-	"1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t"	\
-	ASM_EXCEPTIONTABLE_ENTRY(1b,_e)			\
-	: _tt(_t), "+r"(_a)				\
-	: 						\
-	: "r8")
-
-#define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) 	\
-	__asm__ __volatile__ (				\
-	"1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t"	\
-	ASM_EXCEPTIONTABLE_ENTRY(1b,_e)			\
-	: "+r"(_a) 					\
-	: _tt(_t)					\
-	: "r8")
-
-#define ldbma(_s, _a, _t, _e) def_load_ai_insn(ldbs,1,"=r",_s,_a,_t,_e)
-#define stbma(_s, _t, _a, _e) def_store_ai_insn(stbs,1,"r",_s,_a,_t,_e)
-#define ldwma(_s, _a, _t, _e) def_load_ai_insn(ldw,4,"=r",_s,_a,_t,_e)
-#define stwma(_s, _t, _a, _e) def_store_ai_insn(stw,4,"r",_s,_a,_t,_e)
-#define flddma(_s, _a, _t, _e) def_load_ai_insn(fldd,8,"=f",_s,_a,_t,_e)
-#define fstdma(_s, _t, _a, _e) def_store_ai_insn(fstd,8,"f",_s,_a,_t,_e)
-
-#define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) 	\
-	__asm__ __volatile__ (				\
-	"1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t"	\
-	ASM_EXCEPTIONTABLE_ENTRY(1b,_e)			\
-	: _tt(_t) 					\
-	: "r"(_a)					\
-	: "r8")
-
-#define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) 	\
-	__asm__ __volatile__ (				\
-	"1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t" 	\
-	ASM_EXCEPTIONTABLE_ENTRY(1b,_e)			\
-	: 						\
-	: _tt(_t), "r"(_a)				\
-	: "r8")
-
-#define ldw(_s,_o,_a,_t,_e)	def_load_insn(ldw,"=r",_s,_o,_a,_t,_e)
-#define stw(_s,_t,_o,_a,_e) 	def_store_insn(stw,"r",_s,_t,_o,_a,_e)
-
-#ifdef  CONFIG_PREFETCH
-static inline void prefetch_src(const void *addr)
-{
-	__asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr));
-}
-
-static inline void prefetch_dst(const void *addr)
-{
-	__asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr));
-}
-#else
-#define prefetch_src(addr) do { } while(0)
-#define prefetch_dst(addr) do { } while(0)
-#endif
-
-#define PA_MEMCPY_OK		0
-#define PA_MEMCPY_LOAD_ERROR	1
-#define PA_MEMCPY_STORE_ERROR	2
-
-/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words
- * per loop.  This code is derived from glibc. 
- */
-static noinline unsigned long copy_dstaligned(unsigned long dst,
-					unsigned long src, unsigned long len)
-{
-	/* gcc complains that a2 and a3 may be uninitialized, but actually
-	 * they cannot be.  Initialize a2/a3 to shut gcc up.
-	 */
-	register unsigned int a0, a1, a2 = 0, a3 = 0;
-	int sh_1, sh_2;
-
-	/* prefetch_src((const void *)src); */
-
-	/* Calculate how to shift a word read at the memory operation
-	   aligned srcp to make it aligned for copy.  */
-	sh_1 = 8 * (src % sizeof(unsigned int));
-	sh_2 = 8 * sizeof(unsigned int) - sh_1;
-
-	/* Make src aligned by rounding it down.  */
-	src &= -sizeof(unsigned int);
-
-	switch (len % 4)
-	{
-		case 2:
-			/* a1 = ((unsigned int *) src)[0];
-			   a2 = ((unsigned int *) src)[1]; */
-			ldw(s_space, 0, src, a1, cda_ldw_exc);
-			ldw(s_space, 4, src, a2, cda_ldw_exc);
-			src -= 1 * sizeof(unsigned int);
-			dst -= 3 * sizeof(unsigned int);
-			len += 2;
-			goto do1;
-		case 3:
-			/* a0 = ((unsigned int *) src)[0];
-			   a1 = ((unsigned int *) src)[1]; */
-			ldw(s_space, 0, src, a0, cda_ldw_exc);
-			ldw(s_space, 4, src, a1, cda_ldw_exc);
-			src -= 0 * sizeof(unsigned int);
-			dst -= 2 * sizeof(unsigned int);
-			len += 1;
-			goto do2;
-		case 0:
-			if (len == 0)
-				return PA_MEMCPY_OK;
-			/* a3 = ((unsigned int *) src)[0];
-			   a0 = ((unsigned int *) src)[1]; */
-			ldw(s_space, 0, src, a3, cda_ldw_exc);
-			ldw(s_space, 4, src, a0, cda_ldw_exc);
-			src -=-1 * sizeof(unsigned int);
-			dst -= 1 * sizeof(unsigned int);
-			len += 0;
-			goto do3;
-		case 1:
-			/* a2 = ((unsigned int *) src)[0];
-			   a3 = ((unsigned int *) src)[1]; */
-			ldw(s_space, 0, src, a2, cda_ldw_exc);
-			ldw(s_space, 4, src, a3, cda_ldw_exc);
-			src -=-2 * sizeof(unsigned int);
-			dst -= 0 * sizeof(unsigned int);
-			len -= 1;
-			if (len == 0)
-				goto do0;
-			goto do4;			/* No-op.  */
-	}
-
-	do
-	{
-		/* prefetch_src((const void *)(src + 4 * sizeof(unsigned int))); */
-do4:
-		/* a0 = ((unsigned int *) src)[0]; */
-		ldw(s_space, 0, src, a0, cda_ldw_exc);
-		/* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
-		stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
-do3:
-		/* a1 = ((unsigned int *) src)[1]; */
-		ldw(s_space, 4, src, a1, cda_ldw_exc);
-		/* ((unsigned int *) dst)[1] = MERGE (a3, sh_1, a0, sh_2); */
-		stw(d_space, MERGE (a3, sh_1, a0, sh_2), 4, dst, cda_stw_exc);
-do2:
-		/* a2 = ((unsigned int *) src)[2]; */
-		ldw(s_space, 8, src, a2, cda_ldw_exc);
-		/* ((unsigned int *) dst)[2] = MERGE (a0, sh_1, a1, sh_2); */
-		stw(d_space, MERGE (a0, sh_1, a1, sh_2), 8, dst, cda_stw_exc);
-do1:
-		/* a3 = ((unsigned int *) src)[3]; */
-		ldw(s_space, 12, src, a3, cda_ldw_exc);
-		/* ((unsigned int *) dst)[3] = MERGE (a1, sh_1, a2, sh_2); */
-		stw(d_space, MERGE (a1, sh_1, a2, sh_2), 12, dst, cda_stw_exc);
-
-		src += 4 * sizeof(unsigned int);
-		dst += 4 * sizeof(unsigned int);
-		len -= 4;
-	}
-	while (len != 0);
-
-do0:
-	/* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */
-	stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc);
-
-	preserve_branch(handle_load_error);
-	preserve_branch(handle_store_error);
-
-	return PA_MEMCPY_OK;
-
-handle_load_error:
-	__asm__ __volatile__ ("cda_ldw_exc:\n");
-	return PA_MEMCPY_LOAD_ERROR;
-
-handle_store_error:
-	__asm__ __volatile__ ("cda_stw_exc:\n");
-	return PA_MEMCPY_STORE_ERROR;
-}
-
-
-/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR.
- * In case of an access fault the faulty address can be read from the per_cpu
- * exception data struct. */
-static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp,
-					unsigned long len)
-{
-	register unsigned long src, dst, t1, t2, t3;
-	register unsigned char *pcs, *pcd;
-	register unsigned int *pws, *pwd;
-	register double *pds, *pdd;
-	unsigned long ret;
-
-	src = (unsigned long)srcp;
-	dst = (unsigned long)dstp;
-	pcs = (unsigned char *)srcp;
-	pcd = (unsigned char *)dstp;
-
-	/* prefetch_src((const void *)srcp); */
-
-	if (len < THRESHOLD)
-		goto byte_copy;
-
-	/* Check alignment */
-	t1 = (src ^ dst);
-	if (unlikely(t1 & (sizeof(double)-1)))
-		goto unaligned_copy;
-
-	/* src and dst have same alignment. */
-
-	/* Copy bytes till we are double-aligned. */
-	t2 = src & (sizeof(double) - 1);
-	if (unlikely(t2 != 0)) {
-		t2 = sizeof(double) - t2;
-		while (t2 && len) {
-			/* *pcd++ = *pcs++; */
-			ldbma(s_space, pcs, t3, pmc_load_exc);
-			len--;
-			stbma(d_space, t3, pcd, pmc_store_exc);
-			t2--;
-		}
-	}
-
-	pds = (double *)pcs;
-	pdd = (double *)pcd;
-
-#if 0
-	/* Copy 8 doubles at a time */
-	while (len >= 8*sizeof(double)) {
-		register double r1, r2, r3, r4, r5, r6, r7, r8;
-		/* prefetch_src((char *)pds + L1_CACHE_BYTES); */
-		flddma(s_space, pds, r1, pmc_load_exc);
-		flddma(s_space, pds, r2, pmc_load_exc);
-		flddma(s_space, pds, r3, pmc_load_exc);
-		flddma(s_space, pds, r4, pmc_load_exc);
-		fstdma(d_space, r1, pdd, pmc_store_exc);
-		fstdma(d_space, r2, pdd, pmc_store_exc);
-		fstdma(d_space, r3, pdd, pmc_store_exc);
-		fstdma(d_space, r4, pdd, pmc_store_exc);
-
-#if 0
-		if (L1_CACHE_BYTES <= 32)
-			prefetch_src((char *)pds + L1_CACHE_BYTES);
-#endif
-		flddma(s_space, pds, r5, pmc_load_exc);
-		flddma(s_space, pds, r6, pmc_load_exc);
-		flddma(s_space, pds, r7, pmc_load_exc);
-		flddma(s_space, pds, r8, pmc_load_exc);
-		fstdma(d_space, r5, pdd, pmc_store_exc);
-		fstdma(d_space, r6, pdd, pmc_store_exc);
-		fstdma(d_space, r7, pdd, pmc_store_exc);
-		fstdma(d_space, r8, pdd, pmc_store_exc);
-		len -= 8*sizeof(double);
-	}
-#endif
-
-	pws = (unsigned int *)pds;
-	pwd = (unsigned int *)pdd;
-
-word_copy:
-	while (len >= 8*sizeof(unsigned int)) {
-		register unsigned int r1,r2,r3,r4,r5,r6,r7,r8;
-		/* prefetch_src((char *)pws + L1_CACHE_BYTES); */
-		ldwma(s_space, pws, r1, pmc_load_exc);
-		ldwma(s_space, pws, r2, pmc_load_exc);
-		ldwma(s_space, pws, r3, pmc_load_exc);
-		ldwma(s_space, pws, r4, pmc_load_exc);
-		stwma(d_space, r1, pwd, pmc_store_exc);
-		stwma(d_space, r2, pwd, pmc_store_exc);
-		stwma(d_space, r3, pwd, pmc_store_exc);
-		stwma(d_space, r4, pwd, pmc_store_exc);
-
-		ldwma(s_space, pws, r5, pmc_load_exc);
-		ldwma(s_space, pws, r6, pmc_load_exc);
-		ldwma(s_space, pws, r7, pmc_load_exc);
-		ldwma(s_space, pws, r8, pmc_load_exc);
-		stwma(d_space, r5, pwd, pmc_store_exc);
-		stwma(d_space, r6, pwd, pmc_store_exc);
-		stwma(d_space, r7, pwd, pmc_store_exc);
-		stwma(d_space, r8, pwd, pmc_store_exc);
-		len -= 8*sizeof(unsigned int);
-	}
-
-	while (len >= 4*sizeof(unsigned int)) {
-		register unsigned int r1,r2,r3,r4;
-		ldwma(s_space, pws, r1, pmc_load_exc);
-		ldwma(s_space, pws, r2, pmc_load_exc);
-		ldwma(s_space, pws, r3, pmc_load_exc);
-		ldwma(s_space, pws, r4, pmc_load_exc);
-		stwma(d_space, r1, pwd, pmc_store_exc);
-		stwma(d_space, r2, pwd, pmc_store_exc);
-		stwma(d_space, r3, pwd, pmc_store_exc);
-		stwma(d_space, r4, pwd, pmc_store_exc);
-		len -= 4*sizeof(unsigned int);
-	}
-
-	pcs = (unsigned char *)pws;
-	pcd = (unsigned char *)pwd;
-
-byte_copy:
-	while (len) {
-		/* *pcd++ = *pcs++; */
-		ldbma(s_space, pcs, t3, pmc_load_exc);
-		stbma(d_space, t3, pcd, pmc_store_exc);
-		len--;
-	}
-
-	return PA_MEMCPY_OK;
-
-unaligned_copy:
-	/* possibly we are aligned on a word, but not on a double... */
-	if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) {
-		t2 = src & (sizeof(unsigned int) - 1);
-
-		if (unlikely(t2 != 0)) {
-			t2 = sizeof(unsigned int) - t2;
-			while (t2) {
-				/* *pcd++ = *pcs++; */
-				ldbma(s_space, pcs, t3, pmc_load_exc);
-				stbma(d_space, t3, pcd, pmc_store_exc);
-				len--;
-				t2--;
-			}
-		}
-
-		pws = (unsigned int *)pcs;
-		pwd = (unsigned int *)pcd;
-		goto word_copy;
-	}
-
-	/* Align the destination.  */
-	if (unlikely((dst & (sizeof(unsigned int) - 1)) != 0)) {
-		t2 = sizeof(unsigned int) - (dst & (sizeof(unsigned int) - 1));
-		while (t2) {
-			/* *pcd++ = *pcs++; */
-			ldbma(s_space, pcs, t3, pmc_load_exc);
-			stbma(d_space, t3, pcd, pmc_store_exc);
-			len--;
-			t2--;
-		}
-		dst = (unsigned long)pcd;
-		src = (unsigned long)pcs;
-	}
-
-	ret = copy_dstaligned(dst, src, len / sizeof(unsigned int));
-	if (ret)
-		return ret;
-
-	pcs += (len & -sizeof(unsigned int));
-	pcd += (len & -sizeof(unsigned int));
-	len %= sizeof(unsigned int);
-
-	preserve_branch(handle_load_error);
-	preserve_branch(handle_store_error);
-
-	goto byte_copy;
-
-handle_load_error:
-	__asm__ __volatile__ ("pmc_load_exc:\n");
-	return PA_MEMCPY_LOAD_ERROR;
-
-handle_store_error:
-	__asm__ __volatile__ ("pmc_store_exc:\n");
-	return PA_MEMCPY_STORE_ERROR;
-}
-
-
 /* Returns 0 for success, otherwise, returns number of bytes not transferred. */
-static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len)
-{
-	unsigned long ret, fault_addr, reference;
-	struct exception_data *d;
+extern unsigned long pa_memcpy(void *dst, const void *src,
+				unsigned long len);
 
-	ret = pa_memcpy_internal(dstp, srcp, len);
-	if (likely(ret == PA_MEMCPY_OK))
-		return 0;
-
-	/* if a load or store fault occured we can get the faulty addr */
-	d = this_cpu_ptr(&exception_data);
-	fault_addr = d->fault_addr;
-
-	/* error in load or store? */
-	if (ret == PA_MEMCPY_LOAD_ERROR)
-		reference = (unsigned long) srcp;
-	else
-		reference = (unsigned long) dstp;
-
-	DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n",
-		ret, len, fault_addr, reference);
-
-	if (fault_addr >= reference)
-		return len - (fault_addr - reference);
-	else
-		return len;
-}
-
-#ifdef __KERNEL__
 unsigned long __copy_to_user(void __user *dst, const void *src,
 			     unsigned long len)
 {
@@ -537,5 +84,3 @@
 
 	return __probe_kernel_read(dst, src, size);
 }
-
-#endif
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 1a0b4f6..040c48f 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -149,6 +149,23 @@
 		d->fault_space = regs->isr;
 		d->fault_addr = regs->ior;
 
+		/*
+		 * Fix up get_user() and put_user().
+		 * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant
+		 * bit in the relative address of the fixup routine to indicate
+		 * that %r8 should be loaded with -EFAULT to report a userspace
+		 * access error.
+		 */
+		if (fix->fixup & 1) {
+			regs->gr[8] = -EFAULT;
+
+			/* zero target register for get_user() */
+			if (parisc_acctyp(0, regs->iir) == VM_READ) {
+				int treg = regs->iir & 0x1f;
+				regs->gr[treg] = 0;
+			}
+		}
+
 		regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup;
 		regs->iaoq[0] &= ~3;
 		/*
diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
index 779782f..9a53a06 100644
--- a/arch/x86/lib/memcpy_64.S
+++ b/arch/x86/lib/memcpy_64.S
@@ -290,7 +290,7 @@
 	_ASM_EXTABLE_FAULT(.L_copy_leading_bytes, .L_memcpy_mcsafe_fail)
 	_ASM_EXTABLE_FAULT(.L_cache_w0, .L_memcpy_mcsafe_fail)
 	_ASM_EXTABLE_FAULT(.L_cache_w1, .L_memcpy_mcsafe_fail)
-	_ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
+	_ASM_EXTABLE_FAULT(.L_cache_w2, .L_memcpy_mcsafe_fail)
 	_ASM_EXTABLE_FAULT(.L_cache_w3, .L_memcpy_mcsafe_fail)
 	_ASM_EXTABLE_FAULT(.L_cache_w4, .L_memcpy_mcsafe_fail)
 	_ASM_EXTABLE_FAULT(.L_cache_w5, .L_memcpy_mcsafe_fail)
diff --git a/arch/x86/mm/kaslr.c b/arch/x86/mm/kaslr.c
index 887e571..aed2064 100644
--- a/arch/x86/mm/kaslr.c
+++ b/arch/x86/mm/kaslr.c
@@ -48,7 +48,7 @@
 #if defined(CONFIG_X86_ESPFIX64)
 static const unsigned long vaddr_end = ESPFIX_BASE_ADDR;
 #elif defined(CONFIG_EFI)
-static const unsigned long vaddr_end = EFI_VA_START;
+static const unsigned long vaddr_end = EFI_VA_END;
 #else
 static const unsigned long vaddr_end = __START_KERNEL_map;
 #endif
@@ -105,7 +105,7 @@
 	 */
 	BUILD_BUG_ON(vaddr_start >= vaddr_end);
 	BUILD_BUG_ON(IS_ENABLED(CONFIG_X86_ESPFIX64) &&
-		     vaddr_end >= EFI_VA_START);
+		     vaddr_end >= EFI_VA_END);
 	BUILD_BUG_ON((IS_ENABLED(CONFIG_X86_ESPFIX64) ||
 		      IS_ENABLED(CONFIG_EFI)) &&
 		     vaddr_end >= __START_KERNEL_map);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index f8960fc..9f21b0c 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -713,10 +713,9 @@
 		size = PFN_PHYS(xen_start_info->nr_p2m_frames);
 	}
 
-	if (!xen_is_e820_reserved(start, size)) {
-		memblock_reserve(start, size);
+	memblock_reserve(start, size);
+	if (!xen_is_e820_reserved(start, size))
 		return;
-	}
 
 #ifdef CONFIG_X86_32
 	/*
@@ -727,6 +726,7 @@
 	BUG();
 #else
 	xen_relocate_p2m();
+	memblock_free(start, size);
 #endif
 }
 
diff --git a/block/bio.c b/block/bio.c
index db85c57..655c901 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -372,10 +372,14 @@
 	bio_list_init(&punt);
 	bio_list_init(&nopunt);
 
-	while ((bio = bio_list_pop(current->bio_list)))
+	while ((bio = bio_list_pop(&current->bio_list[0])))
 		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
+	current->bio_list[0] = nopunt;
 
-	*current->bio_list = nopunt;
+	bio_list_init(&nopunt);
+	while ((bio = bio_list_pop(&current->bio_list[1])))
+		bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
+	current->bio_list[1] = nopunt;
 
 	spin_lock(&bs->rescue_lock);
 	bio_list_merge(&bs->rescue_list, &punt);
@@ -462,7 +466,9 @@
 		 * we retry with the original gfp_flags.
 		 */
 
-		if (current->bio_list && !bio_list_empty(current->bio_list))
+		if (current->bio_list &&
+		    (!bio_list_empty(&current->bio_list[0]) ||
+		     !bio_list_empty(&current->bio_list[1])))
 			gfp_mask &= ~__GFP_DIRECT_RECLAIM;
 
 		p = mempool_alloc(bs->bio_pool, gfp_mask);
diff --git a/block/blk-core.c b/block/blk-core.c
index df9e160..710c93b 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1996,7 +1996,14 @@
  */
 blk_qc_t generic_make_request(struct bio *bio)
 {
-	struct bio_list bio_list_on_stack;
+	/*
+	 * bio_list_on_stack[0] contains bios submitted by the current
+	 * make_request_fn.
+	 * bio_list_on_stack[1] contains bios that were submitted before
+	 * the current make_request_fn, but that haven't been processed
+	 * yet.
+	 */
+	struct bio_list bio_list_on_stack[2];
 	blk_qc_t ret = BLK_QC_T_NONE;
 
 	if (!generic_make_request_checks(bio))
@@ -2013,7 +2020,7 @@
 	 * should be added at the tail
 	 */
 	if (current->bio_list) {
-		bio_list_add(current->bio_list, bio);
+		bio_list_add(&current->bio_list[0], bio);
 		goto out;
 	}
 
@@ -2032,23 +2039,39 @@
 	 * bio_list, and call into ->make_request() again.
 	 */
 	BUG_ON(bio->bi_next);
-	bio_list_init(&bio_list_on_stack);
-	current->bio_list = &bio_list_on_stack;
+	bio_list_init(&bio_list_on_stack[0]);
+	current->bio_list = bio_list_on_stack;
 	do {
 		struct request_queue *q = bdev_get_queue(bio->bi_bdev);
 
 		if (likely(blk_queue_enter(q, false) == 0)) {
+			struct bio_list lower, same;
+
+			/* Create a fresh bio_list for all subordinate requests */
+			bio_list_on_stack[1] = bio_list_on_stack[0];
+			bio_list_init(&bio_list_on_stack[0]);
 			ret = q->make_request_fn(q, bio);
 
 			blk_queue_exit(q);
 
-			bio = bio_list_pop(current->bio_list);
+			/* sort new bios into those for a lower level
+			 * and those for the same level
+			 */
+			bio_list_init(&lower);
+			bio_list_init(&same);
+			while ((bio = bio_list_pop(&bio_list_on_stack[0])) != NULL)
+				if (q == bdev_get_queue(bio->bi_bdev))
+					bio_list_add(&same, bio);
+				else
+					bio_list_add(&lower, bio);
+			/* now assemble so we handle the lowest level first */
+			bio_list_merge(&bio_list_on_stack[0], &lower);
+			bio_list_merge(&bio_list_on_stack[0], &same);
+			bio_list_merge(&bio_list_on_stack[0], &bio_list_on_stack[1]);
 		} else {
-			struct bio *bio_next = bio_list_pop(current->bio_list);
-
 			bio_io_error(bio);
-			bio = bio_next;
 		}
+		bio = bio_list_pop(&bio_list_on_stack[0]);
 	} while (bio);
 	current->bio_list = NULL; /* deactivate */
 
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 9ed0878..4c5678c 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -2,7 +2,6 @@
 # Makefile for the Linux ACPI interpreter
 #
 
-ccflags-y			:= -Os
 ccflags-$(CONFIG_ACPI_DEBUG)	+= -DACPI_DEBUG_OUTPUT
 
 #
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index b4c1a6a..03250e1 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -25,9 +25,11 @@
 ACPI_MODULE_NAME("platform");
 
 static const struct acpi_device_id forbidden_id_list[] = {
-	{"PNP0000", 0},	/* PIC */
-	{"PNP0100", 0},	/* Timer */
-	{"PNP0200", 0},	/* AT DMA Controller */
+	{"PNP0000",  0},	/* PIC */
+	{"PNP0100",  0},	/* Timer */
+	{"PNP0200",  0},	/* AT DMA Controller */
+	{"ACPI0009", 0},	/* IOxAPIC */
+	{"ACPI000A", 0},	/* IOAPIC */
 	{"", 0},
 };
 
diff --git a/drivers/android/Kconfig b/drivers/android/Kconfig
index a82fc02..4d4cdc1 100644
--- a/drivers/android/Kconfig
+++ b/drivers/android/Kconfig
@@ -22,7 +22,7 @@
 config ANDROID_BINDER_DEVICES
 	string "Android Binder devices"
 	depends on ANDROID_BINDER_IPC
-	default "binder"
+	default "binder,hwbinder,vndbinder"
 	---help---
 	  Default value for the binder.devices parameter.
 
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c
index 96fb986..18cd3e9 100644
--- a/drivers/char/hw_random/msm-rng.c
+++ b/drivers/char/hw_random/msm-rng.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -156,6 +156,7 @@
 	rng->hwrng.init = msm_rng_init,
 	rng->hwrng.cleanup = msm_rng_cleanup,
 	rng->hwrng.read = msm_rng_read,
+	rng->hwrng.quality = 700;
 
 	ret = devm_hwrng_register(&pdev->dev, &rng->hwrng);
 	if (ret) {
diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig
index cf874a1..7226dd3 100644
--- a/drivers/clk/qcom/Kconfig
+++ b/drivers/clk/qcom/Kconfig
@@ -224,3 +224,5 @@
 	  Support for the graphics clock controller on Qualcomm Technologies, Inc.
 	  sdm845 devices.
 	  Say Y if you want to support graphics controller devices.
+
+source "drivers/clk/qcom/mdss/Kconfig"
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 6e13562..1d042cd 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -38,3 +38,5 @@
 obj-$(CONFIG_MSM_MMCC_8974) += mmcc-msm8974.o
 obj-$(CONFIG_MSM_MMCC_8996) += mmcc-msm8996.o
 obj-$(CONFIG_MSM_VIDEOCC_SDM845) += videocc-sdm845.o
+
+obj-y += mdss/
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index d5e2be6..a358dd0 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -224,6 +224,7 @@
 	u32 osm_clk_rate;
 	u32 xo_clk_rate;
 	bool secure_init;
+	bool per_core_dcvs;
 	bool red_fsm_en;
 	bool boost_fsm_en;
 	bool safe_fsm_en;
@@ -449,6 +450,17 @@
 	return 0;
 }
 
+static long cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+					unsigned long *parent_rate)
+{
+	struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+
+	if (!parent_hw)
+		return -EINVAL;
+
+	return clk_hw_round_rate(parent_hw, rate);
+}
+
 static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw,
 					unsigned long parent_rate)
 {
@@ -1536,8 +1548,16 @@
 	parent = to_clk_osm(clk_hw_get_parent(&c->hw));
 
 	spin_lock_irqsave(&parent->lock, flags);
-	val = clk_osm_read_reg_no_log(parent,
+	/*
+	 * Use core 0's copy as proxy for the whole cluster when per
+	 * core DCVS is disabled.
+	 */
+	if (parent->per_core_dcvs)
+		val = clk_osm_read_reg_no_log(parent,
 			OSM_CYCLE_COUNTER_STATUS_REG(c->core_num));
+	else
+		val = clk_osm_read_reg_no_log(parent,
+			OSM_CYCLE_COUNTER_STATUS_REG(0));
 
 	if (val < c->prev_cycle_counter) {
 		/* Handle counter overflow */
@@ -2503,8 +2523,10 @@
 	clk_osm_misc_programming(&pwrcl_clk);
 	clk_osm_misc_programming(&perfcl_clk);
 
-	if (of_property_read_bool(pdev->dev.of_node,
-				"qcom,enable-per-core-dcvs")) {
+	pwrcl_clk.per_core_dcvs = perfcl_clk.per_core_dcvs =
+			of_property_read_bool(pdev->dev.of_node,
+				"qcom,enable-per-core-dcvs");
+	if (pwrcl_clk.per_core_dcvs) {
 		val = clk_osm_read_reg(&pwrcl_clk, CORE_DCVS_CTRL);
 		val |= BIT(0);
 		clk_osm_write_reg(&pwrcl_clk, val, CORE_DCVS_CTRL);
@@ -2516,6 +2538,7 @@
 
 	clk_ops_core = clk_dummy_ops;
 	clk_ops_core.set_rate = cpu_clk_set_rate;
+	clk_ops_core.round_rate = cpu_clk_round_rate;
 	clk_ops_core.recalc_rate = cpu_clk_recalc_rate;
 
 	spin_lock_init(&l3_clk.lock);
diff --git a/drivers/clk/qcom/debugcc-sdm845.c b/drivers/clk/qcom/debugcc-sdm845.c
index d74db61..d30675c 100644
--- a/drivers/clk/qcom/debugcc-sdm845.c
+++ b/drivers/clk/qcom/debugcc-sdm845.c
@@ -113,6 +113,10 @@
 	"disp_cc_mdss_spdm_pclk1_clk",
 	"disp_cc_mdss_spdm_rot_clk",
 	"disp_cc_mdss_vsync_clk",
+	"measure_only_snoc_clk",
+	"measure_only_cnoc_clk",
+	"measure_only_bimc_clk",
+	"measure_only_ipa_2x_clk",
 	"gcc_aggre_noc_pcie_tbu_clk",
 	"gcc_aggre_ufs_card_axi_clk",
 	"gcc_aggre_ufs_phy_axi_clk",
@@ -444,6 +448,14 @@
 			0x1C, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
 		{ "disp_cc_mdss_vsync_clk", 0x47, 4, DISP_CC,
 			0x6, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "measure_only_snoc_clk", 0x7, 4, GCC,
+			0x7, 0x3FFF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "measure_only_cnoc_clk", 0x15, 4, GCC,
+			0x7, 0x3FFF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "measure_only_bimc_clk", 0xc2, 4, GCC,
+			0x7, 0x3FFF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "measure_only_ipa_2x_clk", 0x128, 4, GCC,
+			0x7, 0x3FFF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_aggre_noc_pcie_tbu_clk", 0x2D, 4, GCC,
 			0x2D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_aggre_ufs_card_axi_clk", 0x11E, 4, GCC,
diff --git a/drivers/clk/qcom/dispcc-sdm845.c b/drivers/clk/qcom/dispcc-sdm845.c
index 6b1eca8..cb073a8 100644
--- a/drivers/clk/qcom/dispcc-sdm845.c
+++ b/drivers/clk/qcom/dispcc-sdm845.c
@@ -106,8 +106,8 @@
 static const char * const disp_cc_parent_names_3[] = {
 	"bi_tcxo",
 	"disp_cc_pll0",
-	"gpll0",
-	"gpll0",
+	"gcc_disp_gpll0_clk_src",
+	"gcc_disp_gpll0_div_clk_src",
 	"core_bi_pll_test_se",
 };
 
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index 08dce3f..2e8ef93 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -36,6 +36,7 @@
 
 #define GCC_APCS_CLOCK_SLEEP_ENA_VOTE_OFFSET	0x52008
 #define CPUSS_AHB_CLK_SLEEP_ENA			BIT(21)
+#define SYS_NOC_CPUSS_AHB_CLK_SLEEP_ENA		BIT(0)
 #define GCC_MMSS_MISC				0x09FFC
 #define GCC_GPU_MISC				0x71028
 
@@ -150,6 +151,38 @@
 	"core_bi_pll_test_se",
 };
 
+static struct clk_dummy measure_only_snoc_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "measure_only_snoc_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
+static struct clk_dummy measure_only_cnoc_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "measure_only_cnoc_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
+static struct clk_dummy measure_only_bimc_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "measure_only_bimc_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
+static struct clk_dummy measure_only_ipa_2x_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "measure_only_ipa_2x_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
 static struct pll_vco fabia_vco[] = {
 	{ 250000000, 2000000000, 0 },
 	{ 125000000, 1000000000, 1 },
@@ -2051,32 +2084,6 @@
 	},
 };
 
-static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
-	.halt_reg = 0x17014,
-	.halt_check = BRANCH_HALT_VOTED,
-	.clkr = {
-		.enable_reg = 0x5200c,
-		.enable_mask = BIT(9),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_qupv3_wrap0_core_2x_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch gcc_qupv3_wrap0_core_clk = {
-	.halt_reg = 0x1700c,
-	.halt_check = BRANCH_HALT_VOTED,
-	.clkr = {
-		.enable_reg = 0x5200c,
-		.enable_mask = BIT(8),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_qupv3_wrap0_core_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
 	.halt_reg = 0x17030,
 	.halt_check = BRANCH_HALT_VOTED,
@@ -2221,32 +2228,6 @@
 	},
 };
 
-static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
-	.halt_reg = 0x18004,
-	.halt_check = BRANCH_HALT_VOTED,
-	.clkr = {
-		.enable_reg = 0x5200c,
-		.enable_mask = BIT(18),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_qupv3_wrap1_core_2x_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch gcc_qupv3_wrap1_core_clk = {
-	.halt_reg = 0x18008,
-	.halt_check = BRANCH_HALT_VOTED,
-	.clkr = {
-		.enable_reg = 0x5200c,
-		.enable_mask = BIT(19),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_qupv3_wrap1_core_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
 	.halt_reg = 0x18014,
 	.halt_check = BRANCH_HALT_VOTED,
@@ -3146,6 +3127,13 @@
 	},
 };
 
+struct clk_hw *gcc_sdm845_hws[] = {
+	[MEASURE_ONLY_SNOC_CLK] = &measure_only_snoc_clk.hw,
+	[MEASURE_ONLY_CNOC_CLK] = &measure_only_cnoc_clk.hw,
+	[MEASURE_ONLY_BIMC_CLK] = &measure_only_bimc_clk.hw,
+	[MEASURE_ONLY_IPA_2X_CLK] = &measure_only_ipa_2x_clk.hw,
+};
+
 static struct clk_regmap *gcc_sdm845_clocks[] = {
 	[GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
 	[GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
@@ -3218,8 +3206,6 @@
 	[GCC_QMIP_CAMERA_AHB_CLK] = &gcc_qmip_camera_ahb_clk.clkr,
 	[GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
 	[GCC_QMIP_VIDEO_AHB_CLK] = &gcc_qmip_video_ahb_clk.clkr,
-	[GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
-	[GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
 	[GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
 	[GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
 	[GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
@@ -3236,8 +3222,6 @@
 	[GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
 	[GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
 	[GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
-	[GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
-	[GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
 	[GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
 	[GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
 	[GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
@@ -3384,19 +3368,21 @@
 
 static int gcc_sdm845_probe(struct platform_device *pdev)
 {
+	struct clk *clk;
 	struct regmap *regmap;
-	int ret = 0;
+	int i, ret = 0;
 
 	regmap = qcom_cc_map(pdev, &gcc_sdm845_desc);
 	if (IS_ERR(regmap))
 		return PTR_ERR(regmap);
 
 	/*
-	 * Set the CPUSS_AHB_CLK_SLEEP_ENA bit to allow the cpuss_ahb_clk to be
+	 * Set the *_SLEEP_ENA bits to allow certain cpuss* clocks to be
 	 * turned off by hardware during certain apps low power modes.
 	 */
 	regmap_update_bits(regmap, GCC_APCS_CLOCK_SLEEP_ENA_VOTE_OFFSET,
-		CPUSS_AHB_CLK_SLEEP_ENA, CPUSS_AHB_CLK_SLEEP_ENA);
+		CPUSS_AHB_CLK_SLEEP_ENA | SYS_NOC_CPUSS_AHB_CLK_SLEEP_ENA,
+		CPUSS_AHB_CLK_SLEEP_ENA | SYS_NOC_CPUSS_AHB_CLK_SLEEP_ENA);
 
 	vdd_cx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_cx");
 	if (IS_ERR(vdd_cx.regulator[0])) {
@@ -3414,6 +3400,13 @@
 		return PTR_ERR(vdd_cx_ao.regulator[0]);
 	}
 
+	/* Register the dummy measurement clocks */
+	for (i = 0; i < ARRAY_SIZE(gcc_sdm845_hws); i++) {
+		clk = devm_clk_register(&pdev->dev, gcc_sdm845_hws[i]);
+		if (IS_ERR(clk))
+			return PTR_ERR(clk);
+	}
+
 	ret = qcom_cc_really_probe(pdev, &gcc_sdm845_desc, regmap);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to register GCC clocks\n");
@@ -3424,8 +3417,9 @@
 	regmap_update_bits(regmap, GCC_MMSS_MISC, 0x3, 0x3);
 	regmap_update_bits(regmap, GCC_GPU_MISC, 0x3, 0x3);
 
-	/* Keep these HMSS clocks enabled always */
+	/* Keep these CPUSS clocks enabled always */
 	clk_prepare_enable(gcc_cpuss_ahb_clk.clkr.hw.clk);
+	clk_prepare_enable(gcc_sys_noc_cpuss_ahb_clk.clkr.hw.clk);
 	clk_prepare_enable(gcc_cpuss_dvm_bus_clk.clkr.hw.clk);
 	clk_prepare_enable(gcc_cpuss_gnoc_clk.clkr.hw.clk);
 
diff --git a/drivers/clk/qcom/gpucc-sdm845.c b/drivers/clk/qcom/gpucc-sdm845.c
index a5a7488..d9a626e 100644
--- a/drivers/clk/qcom/gpucc-sdm845.c
+++ b/drivers/clk/qcom/gpucc-sdm845.c
@@ -80,8 +80,8 @@
 	"bi_tcxo",
 	"gpu_cc_pll0",
 	"gpu_cc_pll1",
-	"gpll0",
-	"gpll0_out_even",
+	"gcc_gpu_gpll0_clk_src",
+	"gcc_gpu_gpll0_div_clk_src",
 	"core_bi_pll_test_se",
 };
 
@@ -101,7 +101,7 @@
 	"gpu_cc_pll0_out_odd",
 	"gpu_cc_pll1_out_even",
 	"gpu_cc_pll1_out_odd",
-	"gpll0",
+	"gcc_gpu_gpll0_clk_src",
 	"core_bi_pll_test_se",
 };
 
@@ -114,8 +114,8 @@
 
 static const char * const gpu_cc_parent_names_2[] = {
 	"bi_tcxo",
-	"gpll0",
-	"gpll0",
+	"gcc_gpu_gpll0_clk_src",
+	"gcc_gpu_gpll0_div_clk_src",
 	"core_bi_pll_test_se",
 };
 
@@ -211,9 +211,9 @@
 	.cmd_rcgr = 0x101c,
 	.mnd_width = 0,
 	.hid_width = 5,
-	.enable_safe_config = true,
 	.parent_map = gpu_cc_parent_map_1,
 	.freq_tbl = ftbl_gpu_cc_gx_gfx3d_clk_src,
+	.flags = FORCE_ENABLE_RCG,
 	.clkr.hw.init = &(struct clk_init_data){
 		.name = "gpu_cc_gx_gfx3d_clk_src",
 		.parent_names = gpu_cc_parent_names_1,
diff --git a/drivers/clk/qcom/mdss/Kconfig b/drivers/clk/qcom/mdss/Kconfig
index 229780e..7213e37 100644
--- a/drivers/clk/qcom/mdss/Kconfig
+++ b/drivers/clk/qcom/mdss/Kconfig
@@ -1,5 +1,6 @@
-config MSM_MDSS_PLL
+config QCOM_MDSS_PLL
 	bool "MDSS pll programming"
+	depends on COMMON_CLK_QCOM
 	---help---
 	It provides support for DSI, eDP and HDMI interface pll programming on MDSS
 	hardware. It also handles the pll specific resources and turn them on/off when
diff --git a/drivers/clk/qcom/mdss/Makefile b/drivers/clk/qcom/mdss/Makefile
index 64c7609..d183393 100644
--- a/drivers/clk/qcom/mdss/Makefile
+++ b/drivers/clk/qcom/mdss/Makefile
@@ -1,9 +1,3 @@
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-pll-util.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-pll.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8996.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8996-util.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dsi-pll-8998.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dp-pll-8998.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-dp-pll-8998-util.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-hdmi-pll-8996.o
-obj-$(CONFIG_MSM_MDSS_PLL) += mdss-hdmi-pll-8998.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll-util.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-pll.o
+obj-$(CONFIG_QCOM_MDSS_PLL) += mdss-dsi-pll-10nm.o
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll-8998.c b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
similarity index 62%
rename from drivers/clk/qcom/mdss/mdss-dsi-pll-8998.c
rename to drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
index 8c6bc2c..6ce0d76 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll-8998.c
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll-10nm.c
@@ -17,14 +17,9 @@
 #include <linux/err.h>
 #include <linux/iopoll.h>
 #include <linux/delay.h>
-#include <linux/clk/msm-clk-provider.h>
-#include <linux/clk/msm-clk.h>
-#include <linux/clk/msm-clock-generic.h>
-#include <dt-bindings/clock/msm-clocks-8998.h>
-
-#include "mdss-pll.h"
 #include "mdss-dsi-pll.h"
 #include "mdss-pll.h"
+#include <dt-bindings/clock/mdss-10nm-pll-clk.h>
 
 #define VCO_DELAY_USEC 1
 
@@ -128,14 +123,14 @@
 	u32 refclk_cycles;
 };
 
-struct dsi_pll_8998 {
+struct dsi_pll_10nm {
 	struct mdss_pll_resources *rsc;
 	struct dsi_pll_config pll_configuration;
 	struct dsi_pll_regs reg_setup;
 };
 
 static struct mdss_pll_resources *pll_rsc_db[DSI_PLL_MAX];
-static struct dsi_pll_8998 plls[DSI_PLL_MAX];
+static struct dsi_pll_10nm plls[DSI_PLL_MAX];
 
 static void dsi_pll_config_slave(struct mdss_pll_resources *rsc)
 {
@@ -166,7 +161,7 @@
 	pr_debug("Slave PLL %s\n", rsc->slave ? "configured" : "absent");
 }
 
-static void dsi_pll_setup_config(struct dsi_pll_8998 *pll,
+static void dsi_pll_setup_config(struct dsi_pll_10nm *pll,
 				 struct mdss_pll_resources *rsc)
 {
 	struct dsi_pll_config *config = &pll->pll_configuration;
@@ -198,14 +193,14 @@
 	dsi_pll_config_slave(rsc);
 }
 
-static void dsi_pll_calc_dec_frac(struct dsi_pll_8998 *pll,
+static void dsi_pll_calc_dec_frac(struct dsi_pll_10nm *pll,
 				  struct mdss_pll_resources *rsc)
 {
 	struct dsi_pll_config *config = &pll->pll_configuration;
 	struct dsi_pll_regs *regs = &pll->reg_setup;
 	u64 target_freq;
 	u64 fref = rsc->vco_ref_clk_rate;
-	u32 computed_output_div, div_log;
+	u32 computed_output_div, div_log = 0;
 	u64 pll_freq;
 	u64 divider;
 	u64 dec, dec_multiple;
@@ -262,7 +257,7 @@
 	regs->frac_div_start_high = (frac & 0x30000) >> 16;
 }
 
-static void dsi_pll_calc_ssc(struct dsi_pll_8998 *pll,
+static void dsi_pll_calc_ssc(struct dsi_pll_10nm *pll,
 		  struct mdss_pll_resources *rsc)
 {
 	struct dsi_pll_config *config = &pll->pll_configuration;
@@ -307,7 +302,7 @@
 			ssc_per, (u32)ssc_step_size, config->ssc_adj_per);
 }
 
-static void dsi_pll_ssc_commit(struct dsi_pll_8998 *pll,
+static void dsi_pll_ssc_commit(struct dsi_pll_10nm *pll,
 		struct mdss_pll_resources *rsc)
 {
 	void __iomem *pll_base = rsc->pll_base;
@@ -333,7 +328,7 @@
 	}
 }
 
-static void dsi_pll_config_hzindep_reg(struct dsi_pll_8998 *pll,
+static void dsi_pll_config_hzindep_reg(struct dsi_pll_10nm *pll,
 				  struct mdss_pll_resources *rsc)
 {
 	void __iomem *pll_base = rsc->pll_base;
@@ -357,7 +352,7 @@
 	MDSS_PLL_REG_W(pll_base, PLL_PLL_LOCK_OVERRIDE, 0x80);
 }
 
-static void dsi_pll_commit(struct dsi_pll_8998 *pll,
+static void dsi_pll_commit(struct dsi_pll_10nm *pll,
 			   struct mdss_pll_resources *rsc)
 {
 	void __iomem *pll_base = rsc->pll_base;
@@ -378,12 +373,13 @@
 
 }
 
-static int vco_8998_set_rate(struct clk *c, unsigned long rate)
+static int vco_10nm_set_rate(struct clk_hw *hw, unsigned long rate,
+			unsigned long parent_rate)
 {
 	int rc;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
 	struct mdss_pll_resources *rsc = vco->priv;
-	struct dsi_pll_8998 *pll;
+	struct dsi_pll_10nm *pll;
 
 	if (!rsc) {
 		pr_err("pll resource not found\n");
@@ -431,7 +427,7 @@
 	return 0;
 }
 
-static int dsi_pll_8998_lock_status(struct mdss_pll_resources *pll)
+static int dsi_pll_10nm_lock_status(struct mdss_pll_resources *pll)
 {
 	int rc;
 	u32 status;
@@ -487,7 +483,7 @@
 	wmb();
 
 	/* Check for PLL lock */
-	rc = dsi_pll_8998_lock_status(rsc);
+	rc = dsi_pll_10nm_lock_status(rsc);
 	if (rc) {
 		pr_err("PLL(%d) lock failed\n", rsc->index);
 		goto error;
@@ -532,9 +528,25 @@
 	rsc->pll_on = false;
 }
 
-static void vco_8998_unprepare(struct clk *c)
+long vco_10nm_round_rate(struct clk_hw *hw, unsigned long rate,
+				unsigned long *parent_rate)
 {
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	unsigned long rrate = rate;
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
+
+	if (rate < vco->min_rate)
+		rrate = vco->min_rate;
+	if (rate > vco->max_rate)
+		rrate = vco->max_rate;
+
+	*parent_rate = rrate;
+
+	return rrate;
+}
+
+static void vco_10nm_unprepare(struct clk_hw *hw)
+{
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
 	struct mdss_pll_resources *pll = vco->priv;
 
 	if (!pll) {
@@ -542,15 +554,15 @@
 		return;
 	}
 
-	pll->vco_cached_rate = c->rate;
+	pll->vco_cached_rate = clk_hw_get_rate(hw);
 	dsi_pll_disable(vco);
 	mdss_pll_resource_enable(pll, false);
 }
 
-static int vco_8998_prepare(struct clk *c)
+static int vco_10nm_prepare(struct clk_hw *hw)
 {
 	int rc = 0;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
 	struct mdss_pll_resources *pll = vco->priv;
 
 	if (!pll) {
@@ -566,8 +578,9 @@
 	}
 
 	if ((pll->vco_cached_rate != 0) &&
-	    (pll->vco_cached_rate == c->rate)) {
-		rc = c->ops->set_rate(c, pll->vco_cached_rate);
+	    (pll->vco_cached_rate == clk_hw_get_rate(hw))) {
+		rc = hw->init->ops->set_rate(hw, pll->vco_cached_rate,
+				pll->vco_cached_rate);
 		if (rc) {
 			pr_err("pll(%d) set_rate failed, rc=%d\n",
 			       pll->index, rc);
@@ -586,9 +599,10 @@
 	return rc;
 }
 
-static unsigned long dsi_pll_get_vco_rate(struct clk *c)
+static unsigned long vco_10nm_recalc_rate(struct clk_hw *hw,
+						unsigned long parent_rate)
 {
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
+	struct dsi_pll_vco_clk *vco = to_vco_clk_hw(hw);
 	struct mdss_pll_resources *pll = vco->priv;
 	int rc;
 	u64 ref_clk = vco->ref_clk_rate;
@@ -642,46 +656,11 @@
 	return (unsigned long)vco_rate;
 }
 
-enum handoff vco_8998_handoff(struct clk *c)
-{
-	enum handoff ret = HANDOFF_DISABLED_CLK;
-	int rc;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-	struct mdss_pll_resources *pll = vco->priv;
-	u32 status;
-
-	if (!pll) {
-		pr_err("Unable to find pll resource\n");
-		return HANDOFF_DISABLED_CLK;
-	}
-
-	rc = mdss_pll_resource_enable(pll, true);
-	if (rc) {
-		pr_err("failed to enable pll(%d) resources, rc=%d\n",
-		       pll->index, rc);
-		return ret;
-	}
-
-	status = MDSS_PLL_REG_R(pll->pll_base, PLL_COMMON_STATUS_ONE);
-	if (status & BIT(0)) {
-		pll->handoff_resources = true;
-		pll->pll_on = true;
-		c->rate = dsi_pll_get_vco_rate(c);
-		ret = HANDOFF_ENABLED_CLK;
-	} else {
-		(void)mdss_pll_resource_enable(pll, false);
-		ret = HANDOFF_DISABLED_CLK;
-	}
-
-	return ret;
-}
-
-static int pixel_clk_get_div(struct div_clk *clk)
+static int pixel_clk_get_div(void *context, unsigned int reg, unsigned int *div)
 {
 	int rc;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 	u32 reg_val;
-	int div;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
@@ -690,11 +669,16 @@
 	}
 
 	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
-	div = (reg_val & 0xF0) >> 4;
+	*div = (reg_val & 0xF0) >> 4;
+
+	if (*div == 0)
+		*div = 1;
+	else
+		*div -= 1;
 
 	(void)mdss_pll_resource_enable(pll, false);
 
-	return div;
+	return rc;
 }
 
 static void pixel_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
@@ -707,16 +691,18 @@
 	MDSS_PLL_REG_W(pll->phy_base, PHY_CMN_CLK_CFG0, reg_val);
 }
 
-static int pixel_clk_set_div(struct div_clk *clk, int div)
+static int pixel_clk_set_div(void *context, unsigned int reg, unsigned int div)
 {
 	int rc;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
 		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
 		return rc;
 	}
+	/* In common clock framework the divider value provided is one less */
+	div++;
 
 	pixel_clk_set_div_sub(pll, div);
 	if (pll->slave)
@@ -727,12 +713,11 @@
 	return 0;
 }
 
-static int bit_clk_get_div(struct div_clk *clk)
+static int bit_clk_get_div(void *context, unsigned int reg, unsigned int *div)
 {
 	int rc;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 	u32 reg_val;
-	int div;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
@@ -741,11 +726,17 @@
 	}
 
 	reg_val = MDSS_PLL_REG_R(pll->phy_base, PHY_CMN_CLK_CFG0);
-	div = (reg_val & 0x0F);
+	*div = (reg_val & 0x0F);
+
+	/* Common clock framework will add one to divider value sent */
+	if (*div == 0)
+		*div = 1;
+	else
+		*div -= 1;
 
 	(void)mdss_pll_resource_enable(pll, false);
 
-	return div;
+	return rc;
 }
 
 static void bit_clk_set_div_sub(struct mdss_pll_resources *rsc, int div)
@@ -758,10 +749,10 @@
 	MDSS_PLL_REG_W(rsc->phy_base, PHY_CMN_CLK_CFG0, reg_val);
 }
 
-static int bit_clk_set_div(struct div_clk *clk, int div)
+static int bit_clk_set_div(void *context, unsigned int reg, unsigned int div)
 {
 	int rc;
-	struct mdss_pll_resources *rsc = clk->priv;
+	struct mdss_pll_resources *rsc = context;
 	struct dsi_pll_8998 *pll;
 
 	if (!rsc) {
@@ -780,6 +771,7 @@
 		pr_err("Failed to enable dsi pll resources, rc=%d\n", rc);
 		return rc;
 	}
+	div++;
 
 	bit_clk_set_div_sub(rsc, div);
 	/* For slave PLL, this divider always should be set to 1 */
@@ -791,12 +783,12 @@
 	return rc;
 }
 
-static int post_vco_clk_get_div(struct div_clk *clk)
+static int post_vco_clk_get_div(void *context, unsigned int reg,
+			unsigned int *div)
 {
 	int rc;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 	u32 reg_val;
-	int div;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
@@ -808,15 +800,20 @@
 	reg_val &= 0x3;
 
 	if (reg_val == 2)
-		div = 1;
+		*div = 1;
 	else if (reg_val == 3)
-		div = 4;
+		*div = 4;
 	else
-		div = 1;
+		*div = 1;
+
+	if (*div == 0)
+		*div = 1;
+	else
+		*div -= 1;
 
 	(void)mdss_pll_resource_enable(pll, false);
 
-	return div;
+	return rc;
 }
 
 static int post_vco_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
@@ -842,10 +839,11 @@
 	return rc;
 }
 
-static int post_vco_clk_set_div(struct div_clk *clk, int div)
+static int post_vco_clk_set_div(void *context, unsigned int reg,
+		unsigned int div)
 {
 	int rc = 0;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
@@ -853,6 +851,8 @@
 		return rc;
 	}
 
+	div++;
+
 	rc = post_vco_clk_set_div_sub(pll, div);
 	if (!rc && pll->slave)
 		rc = post_vco_clk_set_div_sub(pll->slave, div);
@@ -862,12 +862,12 @@
 	return rc;
 }
 
-static int post_bit_clk_get_div(struct div_clk *clk)
+static int post_bit_clk_get_div(void *context, unsigned int reg,
+			unsigned int *div)
 {
 	int rc;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 	u32 reg_val;
-	int div;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
@@ -879,15 +879,20 @@
 	reg_val &= 0x3;
 
 	if (reg_val == 0)
-		div = 1;
+		*div = 1;
 	else if (reg_val == 1)
-		div = 2;
+		*div = 2;
 	else
-		div = 1;
+		*div = 1;
+
+	if (*div == 0)
+		*div = 1;
+	else
+		*div -= 1;
 
 	(void)mdss_pll_resource_enable(pll, false);
 
-	return div;
+	return rc;
 }
 
 static int post_bit_clk_set_div_sub(struct mdss_pll_resources *pll, int div)
@@ -913,10 +918,11 @@
 	return rc;
 }
 
-static int post_bit_clk_set_div(struct div_clk *clk, int div)
+static int post_bit_clk_set_div(void *context, unsigned int reg,
+		unsigned int div)
 {
 	int rc = 0;
-	struct mdss_pll_resources *pll = clk->priv;
+	struct mdss_pll_resources *pll = context;
 
 	rc = mdss_pll_resource_enable(pll, true);
 	if (rc) {
@@ -924,6 +930,8 @@
 		return rc;
 	}
 
+	div++;
+
 	rc = post_bit_clk_set_div_sub(pll, div);
 	if (!rc && pll->slave)
 		rc = post_bit_clk_set_div_sub(pll->slave, div);
@@ -933,57 +941,44 @@
 	return rc;
 }
 
-long vco_8998_round_rate(struct clk *c, unsigned long rate)
-{
-	unsigned long rrate = rate;
-	struct dsi_pll_vco_clk *vco = to_vco_clk(c);
-
-	if (rate < vco->min_rate)
-		rrate = vco->min_rate;
-	if (rate > vco->max_rate)
-		rrate = vco->max_rate;
-
-	return rrate;
-}
-
-/* clk ops that require runtime fixup */
-static const struct clk_ops clk_ops_gen_mux_dsi;
-static const struct clk_ops clk_ops_bitclk_src_c;
-static const struct clk_ops clk_ops_post_vco_div_c;
-static const struct clk_ops clk_ops_post_bit_div_c;
-static const struct clk_ops clk_ops_pclk_src_c;
-
-static struct clk_div_ops clk_post_vco_div_ops = {
-	.set_div = post_vco_clk_set_div,
-	.get_div = post_vco_clk_get_div,
+static struct regmap_config dsi_pll_10nm_config = {
+	.reg_bits = 32,
+	.reg_stride = 4,
+	.val_bits = 32,
+	.max_register = 0x7c0,
 };
 
-static struct clk_div_ops clk_post_bit_div_ops = {
-	.set_div = post_bit_clk_set_div,
-	.get_div = post_bit_clk_get_div,
+static struct regmap_bus post_vco_regmap_bus = {
+	.reg_write = post_vco_clk_set_div,
+	.reg_read = post_vco_clk_get_div,
 };
 
-static struct clk_div_ops pixel_clk_div_ops = {
-	.set_div = pixel_clk_set_div,
-	.get_div = pixel_clk_get_div,
+static struct regmap_bus post_bit_regmap_bus = {
+	.reg_write = post_bit_clk_set_div,
+	.reg_read = post_bit_clk_get_div,
 };
 
-static struct clk_div_ops clk_bitclk_src_ops = {
-	.set_div = bit_clk_set_div,
-	.get_div = bit_clk_get_div,
+static struct regmap_bus pclk_src_regmap_bus = {
+	.reg_write = pixel_clk_set_div,
+	.reg_read = pixel_clk_get_div,
 };
 
-static const struct clk_ops clk_ops_vco_8998 = {
-	.set_rate = vco_8998_set_rate,
-	.round_rate = vco_8998_round_rate,
-	.handoff = vco_8998_handoff,
-	.prepare = vco_8998_prepare,
-	.unprepare = vco_8998_unprepare,
+static struct regmap_bus bitclk_src_regmap_bus = {
+	.reg_write = bit_clk_set_div,
+	.reg_read = bit_clk_get_div,
 };
 
-static struct clk_mux_ops mdss_mux_ops = {
-	.set_mux_sel = mdss_set_mux_sel,
-	.get_mux_sel = mdss_get_mux_sel,
+static const struct clk_ops clk_ops_vco_10nm = {
+	.recalc_rate = vco_10nm_recalc_rate,
+	.set_rate = vco_10nm_set_rate,
+	.round_rate = vco_10nm_round_rate,
+	.prepare = vco_10nm_prepare,
+	.unprepare = vco_10nm_unprepare,
+};
+
+static struct regmap_bus mdss_mux_regmap_bus = {
+	.reg_write = mdss_set_mux_sel,
+	.reg_read = mdss_get_mux_sel,
 };
 
 /*
@@ -1039,303 +1034,296 @@
 	.ref_clk_rate = 19200000UL,
 	.min_rate = 1500000000UL,
 	.max_rate = 3500000000UL,
-	.c = {
-		.dbg_name = "dsi0pll_vco_clk",
-		.ops = &clk_ops_vco_8998,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_vco_clk.c),
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_vco_clk",
+			.parent_names = (const char *[]){"xo_board"},
+			.num_parents = 1,
+			.ops = &clk_ops_vco_10nm,
+			.flags = CLK_GET_RATE_NOCACHE,
 	},
 };
 
-static struct div_clk dsi0pll_bitclk_src = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 15,
-	},
-	.ops = &clk_bitclk_src_ops,
-	.c = {
-		.parent = &dsi0pll_vco_clk.c,
-		.dbg_name = "dsi0pll_bitclk_src",
-		.ops = &clk_ops_bitclk_src_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_bitclk_src.c),
-	}
-};
-
-static struct div_clk dsi0pll_post_vco_div = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 4,
-	},
-	.ops = &clk_post_vco_div_ops,
-	.c = {
-		.parent = &dsi0pll_vco_clk.c,
-		.dbg_name = "dsi0pll_post_vco_div",
-		.ops = &clk_ops_post_vco_div_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_post_vco_div.c),
-	}
-};
-
-static struct div_clk dsi0pll_post_bit_div = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 2,
-	},
-	.ops = &clk_post_bit_div_ops,
-	.c = {
-		.parent = &dsi0pll_bitclk_src.c,
-		.dbg_name = "dsi0pll_post_bit_div",
-		.ops = &clk_ops_post_bit_div_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_post_bit_div.c),
-	}
-};
-
-static struct mux_clk dsi0pll_pclk_src_mux = {
-	.num_parents = 2,
-	.parents = (struct clk_src[]) {
-		{&dsi0pll_post_bit_div.c, 0},
-		{&dsi0pll_post_vco_div.c, 1},
-	},
-	.ops = &mdss_mux_ops,
-	.c = {
-		.parent = &dsi0pll_post_bit_div.c,
-		.dbg_name = "dsi0pll_pclk_src_mux",
-		.ops = &clk_ops_gen_mux,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_pclk_src_mux.c),
-	}
-};
-
-static struct div_clk dsi0pll_pclk_src = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 15,
-	},
-	.ops = &pixel_clk_div_ops,
-	.c = {
-		.parent = &dsi0pll_pclk_src_mux.c,
-		.dbg_name = "dsi0pll_pclk_src",
-		.ops = &clk_ops_pclk_src_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_pclk_src.c),
-	},
-};
-
-static struct mux_clk dsi0pll_pclk_mux = {
-	.num_parents = 1,
-	.parents = (struct clk_src[]) {
-		{&dsi0pll_pclk_src.c, 0},
-	},
-	.ops = &mdss_mux_ops,
-	.c = {
-		.parent = &dsi0pll_pclk_src.c,
-		.dbg_name = "dsi0pll_pclk_mux",
-		.ops = &clk_ops_gen_mux_dsi,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_pclk_mux.c),
-	}
-};
-
-static struct div_clk dsi0pll_byteclk_src = {
-	.data = {
-		.div = 8,
-		.min_div = 8,
-		.max_div = 8,
-	},
-	.c = {
-		.parent = &dsi0pll_bitclk_src.c,
-		.dbg_name = "dsi0pll_byteclk_src",
-		.ops = &clk_ops_div,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_byteclk_src.c),
-	},
-};
-
-static struct mux_clk dsi0pll_byteclk_mux = {
-	.num_parents = 1,
-	.parents = (struct clk_src[]) {
-		{&dsi0pll_byteclk_src.c, 0},
-	},
-	.ops = &mdss_mux_ops,
-	.c = {
-		.parent = &dsi0pll_byteclk_src.c,
-		.dbg_name = "dsi0pll_byteclk_mux",
-		.ops = &clk_ops_gen_mux_dsi,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi0pll_byteclk_mux.c),
-	}
-};
-
 static struct dsi_pll_vco_clk dsi1pll_vco_clk = {
 	.ref_clk_rate = 19200000UL,
 	.min_rate = 1500000000UL,
 	.max_rate = 3500000000UL,
-	.c = {
-		.dbg_name = "dsi1pll_vco_clk",
-		.ops = &clk_ops_vco_8998,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_vco_clk.c),
+	.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_vco_clk",
+			.parent_names = (const char *[]){"xo_board"},
+			.num_parents = 1,
+			.ops = &clk_ops_vco_10nm,
+			.flags = CLK_GET_RATE_NOCACHE,
 	},
 };
 
-static struct div_clk dsi1pll_bitclk_src = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 15,
-	},
-	.ops = &clk_bitclk_src_ops,
-	.c = {
-		.parent = &dsi1pll_vco_clk.c,
-		.dbg_name = "dsi1pll_bitclk_src",
-		.ops = &clk_ops_bitclk_src_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_bitclk_src.c),
-	}
-};
-
-static struct div_clk dsi1pll_post_vco_div = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 4,
-	},
-	.ops = &clk_post_vco_div_ops,
-	.c = {
-		.parent = &dsi1pll_vco_clk.c,
-		.dbg_name = "dsi1pll_post_vco_div",
-		.ops = &clk_ops_post_vco_div_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_post_vco_div.c),
-	}
-};
-
-static struct div_clk dsi1pll_post_bit_div = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 2,
-	},
-	.ops = &clk_post_bit_div_ops,
-	.c = {
-		.parent = &dsi1pll_bitclk_src.c,
-		.dbg_name = "dsi1pll_post_bit_div",
-		.ops = &clk_ops_post_bit_div_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_post_bit_div.c),
-	}
-};
-
-static struct mux_clk dsi1pll_pclk_src_mux = {
-	.num_parents = 2,
-	.parents = (struct clk_src[]) {
-		{&dsi1pll_post_bit_div.c, 0},
-		{&dsi1pll_post_vco_div.c, 1},
-	},
-	.ops = &mdss_mux_ops,
-	.c = {
-		.parent = &dsi1pll_post_bit_div.c,
-		.dbg_name = "dsi1pll_pclk_src_mux",
-		.ops = &clk_ops_gen_mux,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_pclk_src_mux.c),
-	}
-};
-
-static struct div_clk dsi1pll_pclk_src = {
-	.data = {
-		.div = 1,
-		.min_div = 1,
-		.max_div = 15,
-	},
-	.ops = &pixel_clk_div_ops,
-	.c = {
-		.parent = &dsi1pll_pclk_src_mux.c,
-		.dbg_name = "dsi1pll_pclk_src",
-		.ops = &clk_ops_pclk_src_c,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_pclk_src.c),
+static struct clk_regmap_div dsi0pll_bitclk_src = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_bitclk_src",
+			.parent_names = (const char *[]){"dsi0pll_vco_clk"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
 	},
 };
 
-static struct mux_clk dsi1pll_pclk_mux = {
-	.num_parents = 1,
-	.parents = (struct clk_src[]) {
-		{&dsi1pll_pclk_src.c, 0},
-	},
-	.ops = &mdss_mux_ops,
-	.c = {
-		.parent = &dsi1pll_pclk_src.c,
-		.dbg_name = "dsi1pll_pclk_mux",
-		.ops = &clk_ops_gen_mux_dsi,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_pclk_mux.c),
-	}
-};
-
-static struct div_clk dsi1pll_byteclk_src = {
-	.data = {
-		.div = 8,
-		.min_div = 8,
-		.max_div = 8,
-	},
-	.c = {
-		.parent = &dsi1pll_bitclk_src.c,
-		.dbg_name = "dsi1pll_byteclk_src",
-		.ops = &clk_ops_div,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_byteclk_src.c),
+static struct clk_regmap_div dsi1pll_bitclk_src = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_bitclk_src",
+			.parent_names = (const char *[]){"dsi1pll_vco_clk"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
 	},
 };
 
-static struct mux_clk dsi1pll_byteclk_mux = {
-	.num_parents = 1,
-	.parents = (struct clk_src[]) {
-		{&dsi1pll_byteclk_src.c, 0},
+static struct clk_regmap_div dsi0pll_post_vco_div = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_post_vco_div",
+			.parent_names = (const char *[]){"dsi0pll_vco_clk"},
+			.num_parents = 1,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
 	},
-	.ops = &mdss_mux_ops,
-	.c = {
-		.parent = &dsi1pll_byteclk_src.c,
-		.dbg_name = "dsi1pll_byteclk_mux",
-		.ops = &clk_ops_gen_mux_dsi,
-		.flags = CLKFLAG_NO_RATE_CACHE,
-		CLK_INIT(dsi1pll_byteclk_mux.c),
-	}
 };
 
-static struct clk_lookup mdss_dsi_pll0cc_8998[] = {
-	CLK_LIST(dsi0pll_byteclk_mux),
-	CLK_LIST(dsi0pll_byteclk_src),
-	CLK_LIST(dsi0pll_pclk_mux),
-	CLK_LIST(dsi0pll_pclk_src),
-	CLK_LIST(dsi0pll_pclk_src_mux),
-	CLK_LIST(dsi0pll_post_bit_div),
-	CLK_LIST(dsi0pll_post_vco_div),
-	CLK_LIST(dsi0pll_bitclk_src),
-	CLK_LIST(dsi0pll_vco_clk),
-};
-static struct clk_lookup mdss_dsi_pll1cc_8998[] = {
-	CLK_LIST(dsi1pll_byteclk_mux),
-	CLK_LIST(dsi1pll_byteclk_src),
-	CLK_LIST(dsi1pll_pclk_mux),
-	CLK_LIST(dsi1pll_pclk_src),
-	CLK_LIST(dsi1pll_pclk_src_mux),
-	CLK_LIST(dsi1pll_post_bit_div),
-	CLK_LIST(dsi1pll_post_vco_div),
-	CLK_LIST(dsi1pll_bitclk_src),
-	CLK_LIST(dsi1pll_vco_clk),
+static struct clk_regmap_div dsi1pll_post_vco_div = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_post_vco_div",
+			.parent_names = (const char *[]){"dsi1pll_vco_clk"},
+			.num_parents = 1,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
 };
 
-int dsi_pll_clock_register_8998(struct platform_device *pdev,
+static struct clk_fixed_factor dsi0pll_byteclk_src = {
+	.div = 8,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi0pll_byteclk_src",
+		.parent_names = (const char *[]){"dsi0pll_bitclk_src"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_fixed_factor dsi1pll_byteclk_src = {
+	.div = 8,
+	.mult = 1,
+	.hw.init = &(struct clk_init_data){
+		.name = "dsi1pll_byteclk_src",
+		.parent_names = (const char *[]){"dsi1pll_bitclk_src"},
+		.num_parents = 1,
+		.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+		.ops = &clk_fixed_factor_ops,
+	},
+};
+
+static struct clk_regmap_div dsi0pll_post_bit_div = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_post_bit_div",
+			.parent_names = (const char *[]){"dsi0pll_bitclk_src"},
+			.num_parents = 1,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi1pll_post_bit_div = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_post_bit_div",
+			.parent_names = (const char *[]){"dsi1pll_bitclk_src"},
+			.num_parents = 1,
+			.flags = CLK_GET_RATE_NOCACHE,
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi0pll_byteclk_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_byteclk_mux",
+			.parent_names = (const char *[]){"dsi0pll_byteclk_src"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi1pll_byteclk_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_byteclk_mux",
+			.parent_names = (const char *[]){"dsi1pll_byteclk_src"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi0pll_pclk_src_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_pclk_src_mux",
+			.parent_names = (const char *[]){"dsi0pll_post_bit_div",
+						"dsi0pll_post_bit_div"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi1pll_pclk_src_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_pclk_src_mux",
+			.parent_names = (const char *[]){"dsi1pll_post_bit_div",
+						"dsi1pll_post_bit_div"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi0pll_pclk_src = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_pclk_src",
+			.parent_names = (const char *[]){
+					"dsi0pll_pclk_src_mux"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_div dsi1pll_pclk_src = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_pclk_src",
+			.parent_names = (const char *[]){
+					"dsi1pll_pclk_src_mux"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_div_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi0pll_pclk_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi0pll_pclk_mux",
+			.parent_names = (const char *[]){"dsi0pll_pclk_src"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_regmap_mux dsi1pll_pclk_mux = {
+	.reg = 0x48,
+	.shift = 0,
+	.width = 4,
+	.clkr = {
+		.hw.init = &(struct clk_init_data){
+			.name = "dsi1pll_pclk_mux",
+			.parent_names = (const char *[]){"dsi1pll_pclk_src"},
+			.num_parents = 1,
+			.flags = (CLK_GET_RATE_NOCACHE | CLK_SET_RATE_PARENT),
+			.ops = &clk_regmap_mux_closest_ops,
+		},
+	},
+};
+
+static struct clk_hw *mdss_dsi_pllcc_10nm[] = {
+	[VCO_CLK_0] = &dsi0pll_vco_clk.hw,
+	[BITCLK_SRC_0_CLK] = &dsi0pll_bitclk_src.clkr.hw,
+	[BYTECLK_SRC_0_CLK] = &dsi0pll_byteclk_src.hw,
+	[POST_BIT_DIV_0_CLK] = &dsi0pll_post_bit_div.clkr.hw,
+	[POST_VCO_DIV_0_CLK] = &dsi0pll_post_vco_div.clkr.hw,
+	[BYTECLK_MUX_0_CLK] = &dsi0pll_byteclk_mux.clkr.hw,
+	[PCLK_SRC_MUX_0_CLK] = &dsi0pll_pclk_src_mux.clkr.hw,
+	[PCLK_SRC_0_CLK] = &dsi0pll_pclk_src.clkr.hw,
+	[PCLK_MUX_0_CLK] = &dsi0pll_pclk_mux.clkr.hw,
+	[VCO_CLK_1] = &dsi1pll_vco_clk.hw,
+	[BITCLK_SRC_1_CLK] = &dsi1pll_bitclk_src.clkr.hw,
+	[BYTECLK_SRC_1_CLK] = &dsi1pll_byteclk_src.hw,
+	[POST_BIT_DIV_1_CLK] = &dsi1pll_post_bit_div.clkr.hw,
+	[POST_VCO_DIV_1_CLK] = &dsi1pll_post_vco_div.clkr.hw,
+	[BYTECLK_MUX_1_CLK] = &dsi1pll_byteclk_mux.clkr.hw,
+	[PCLK_SRC_MUX_1_CLK] = &dsi1pll_pclk_src_mux.clkr.hw,
+	[PCLK_SRC_1_CLK] = &dsi1pll_pclk_src.clkr.hw,
+	[PCLK_MUX_1_CLK] = &dsi1pll_pclk_mux.clkr.hw,
+
+};
+
+int dsi_pll_clock_register_10nm(struct platform_device *pdev,
 				  struct mdss_pll_resources *pll_res)
 {
-	int rc = 0, ndx;
+	int rc = 0, ndx, i;
+	struct clk *clk;
+	struct clk_onecell_data *clk_data;
+	int num_clks = ARRAY_SIZE(mdss_dsi_pllcc_10nm);
+	struct regmap *rmap;
 
 	if (!pdev || !pdev->dev.of_node ||
 		!pll_res || !pll_res->pll_base || !pll_res->phy_base) {
@@ -1353,62 +1341,120 @@
 	pll_rsc_db[ndx] = pll_res;
 	pll_res->priv = &plls[ndx];
 	plls[ndx].rsc = pll_res;
-
-	/* runtime fixup of all div and mux clock ops */
-	clk_ops_gen_mux_dsi = clk_ops_gen_mux;
-	clk_ops_gen_mux_dsi.round_rate = parent_round_rate;
-	clk_ops_gen_mux_dsi.set_rate = parent_set_rate;
-
-	clk_ops_bitclk_src_c = clk_ops_div;
-	clk_ops_bitclk_src_c.prepare = mdss_pll_div_prepare;
-
-	/*
-	 * Set the ops for the two dividers in the pixel clock tree to the
-	 * slave_div to ensure that a set rate on this divider clock will not
-	 * be propagated to it's parent. This is needed ensure that when we set
-	 * the rate for pixel clock, the vco is not reconfigured
-	 */
-	clk_ops_post_vco_div_c = clk_ops_slave_div;
-	clk_ops_post_vco_div_c.prepare = mdss_pll_div_prepare;
-
-	clk_ops_post_bit_div_c = clk_ops_slave_div;
-	clk_ops_post_bit_div_c.prepare = mdss_pll_div_prepare;
-
-	clk_ops_pclk_src_c = clk_ops_div;
-	clk_ops_pclk_src_c.prepare = mdss_pll_div_prepare;
-
 	pll_res->vco_delay = VCO_DELAY_USEC;
-	if (ndx == 0) {
-		dsi0pll_byteclk_mux.priv = pll_res;
-		dsi0pll_byteclk_src.priv = pll_res;
-		dsi0pll_pclk_mux.priv = pll_res;
-		dsi0pll_pclk_src.priv = pll_res;
-		dsi0pll_pclk_src_mux.priv = pll_res;
-		dsi0pll_post_bit_div.priv = pll_res;
-		dsi0pll_post_vco_div.priv = pll_res;
-		dsi0pll_bitclk_src.priv = pll_res;
-		dsi0pll_vco_clk.priv = pll_res;
 
-		rc = of_msm_clock_register(pdev->dev.of_node,
-			mdss_dsi_pll0cc_8998,
-			ARRAY_SIZE(mdss_dsi_pll0cc_8998));
-	} else {
-		dsi1pll_byteclk_mux.priv = pll_res;
-		dsi1pll_byteclk_src.priv = pll_res;
-		dsi1pll_pclk_mux.priv = pll_res;
-		dsi1pll_pclk_src.priv = pll_res;
-		dsi1pll_pclk_src_mux.priv = pll_res;
-		dsi1pll_post_bit_div.priv = pll_res;
-		dsi1pll_post_vco_div.priv = pll_res;
-		dsi1pll_bitclk_src.priv = pll_res;
-		dsi1pll_vco_clk.priv = pll_res;
+	clk_data = devm_kzalloc(&pdev->dev, sizeof(struct clk_onecell_data),
+					GFP_KERNEL);
+	if (!clk_data)
+		return -ENOMEM;
 
-		rc = of_msm_clock_register(pdev->dev.of_node,
-			mdss_dsi_pll1cc_8998,
-			ARRAY_SIZE(mdss_dsi_pll1cc_8998));
+	clk_data->clks = devm_kzalloc(&pdev->dev, (num_clks *
+				sizeof(struct clk *)), GFP_KERNEL);
+	if (!clk_data->clks) {
+		devm_kfree(&pdev->dev, clk_data);
+		return -ENOMEM;
 	}
-	if (rc)
-		pr_err("dsi%dpll clock register failed, rc=%d\n", ndx, rc);
+	clk_data->clk_num = num_clks;
 
+	/* Establish client data */
+	if (ndx == 0) {
+		rmap = devm_regmap_init(&pdev->dev, &post_vco_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_post_vco_div.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &post_bit_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_post_bit_div.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_bitclk_src.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_pclk_src.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_pclk_mux.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_pclk_src_mux.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi0pll_byteclk_mux.clkr.regmap = rmap;
+
+		for (i = VCO_CLK_0; i <= PCLK_MUX_0_CLK; i++) {
+			clk = devm_clk_register(&pdev->dev,
+						mdss_dsi_pllcc_10nm[i]);
+			if (IS_ERR(clk)) {
+				pr_err("clk registration failed for DSI clock:%d\n",
+							pll_res->index);
+				rc = -EINVAL;
+				goto clk_register_fail;
+			}
+			clk_data->clks[i] = clk;
+
+		}
+
+		rc = of_clk_add_provider(pdev->dev.of_node,
+				of_clk_src_onecell_get, clk_data);
+
+
+	} else {
+		rmap = devm_regmap_init(&pdev->dev, &post_vco_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_post_vco_div.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &post_bit_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_post_bit_div.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &bitclk_src_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_bitclk_src.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &pclk_src_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_pclk_src.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_pclk_mux.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_pclk_src_mux.clkr.regmap = rmap;
+
+		rmap = devm_regmap_init(&pdev->dev, &mdss_mux_regmap_bus,
+				pll_res, &dsi_pll_10nm_config);
+		dsi1pll_byteclk_mux.clkr.regmap = rmap;
+
+		for (i = VCO_CLK_1; i <= PCLK_MUX_1_CLK; i++) {
+			clk = devm_clk_register(&pdev->dev,
+						mdss_dsi_pllcc_10nm[i]);
+			if (IS_ERR(clk)) {
+				pr_err("clk registration failed for DSI clock:%d\n",
+						pll_res->index);
+				rc = -EINVAL;
+				goto clk_register_fail;
+			}
+			clk_data->clks[i] = clk;
+
+		}
+
+		rc = of_clk_add_provider(pdev->dev.of_node,
+				of_clk_src_onecell_get, clk_data);
+	}
+	if (!rc) {
+		pr_info("Registered DSI PLL ndx=%d, clocks successfully", ndx);
+
+		return rc;
+	}
+clk_register_fail:
+	devm_kfree(&pdev->dev, clk_data->clks);
+	devm_kfree(&pdev->dev, clk_data);
 	return rc;
 }
diff --git a/drivers/clk/qcom/mdss/mdss-dsi-pll.h b/drivers/clk/qcom/mdss/mdss-dsi-pll.h
index 286c99e..7fc38a2 100644
--- a/drivers/clk/qcom/mdss/mdss-dsi-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-dsi-pll.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -13,6 +13,8 @@
 #ifndef __MDSS_DSI_PLL_H
 #define __MDSS_DSI_PLL_H
 
+#include <linux/clk-provider.h>
+#include "mdss-pll.h"
 #define MAX_DSI_PLL_EN_SEQS	10
 
 #define DSI_PHY_PLL_UNIPHY_PLL_GLB_CFG		(0x0020)
@@ -31,6 +33,7 @@
 };
 
 struct dsi_pll_vco_clk {
+	struct clk_hw	hw;
 	unsigned long	ref_clk_rate;
 	unsigned long	min_rate;
 	unsigned long	max_rate;
@@ -38,73 +41,16 @@
 	struct lpfr_cfg *lpfr_lut;
 	u32		lpfr_lut_size;
 	void		*priv;
-
-	struct clk	c;
-
 	int (*pll_enable_seqs[MAX_DSI_PLL_EN_SEQS])
 			(struct mdss_pll_resources *dsi_pll_Res);
 };
 
-static inline struct dsi_pll_vco_clk *to_vco_clk(struct clk *clk)
+int dsi_pll_clock_register_10nm(struct platform_device *pdev,
+				struct mdss_pll_resources *pll_res);
+
+static inline struct dsi_pll_vco_clk *to_vco_clk_hw(struct clk_hw *hw)
 {
-	return container_of(clk, struct dsi_pll_vco_clk, c);
+	return container_of(hw, struct dsi_pll_vco_clk, hw);
 }
 
-int dsi_pll_clock_register_hpm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_20nm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_lpm(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_8996(struct platform_device *pdev,
-				struct mdss_pll_resources *pll_res);
-int dsi_pll_clock_register_8998(struct platform_device *pdev,
-				  struct mdss_pll_resources *pll_res);
-
-int set_byte_mux_sel(struct mux_clk *clk, int sel);
-int get_byte_mux_sel(struct mux_clk *clk);
-int dsi_pll_mux_prepare(struct clk *c);
-int fixed_4div_set_div(struct div_clk *clk, int div);
-int fixed_4div_get_div(struct div_clk *clk);
-int digital_set_div(struct div_clk *clk, int div);
-int digital_get_div(struct div_clk *clk);
-int analog_set_div(struct div_clk *clk, int div);
-int analog_get_div(struct div_clk *clk);
-int dsi_pll_lock_status(struct mdss_pll_resources *dsi_pll_res);
-int vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate);
-unsigned long vco_get_rate(struct clk *c);
-long vco_round_rate(struct clk *c, unsigned long rate);
-enum handoff vco_handoff(struct clk *c);
-int vco_prepare(struct clk *c);
-void vco_unprepare(struct clk *c);
-
-/* APIs for 20nm PHY PLL */
-int pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco, unsigned long rate);
-int shadow_pll_20nm_vco_set_rate(struct dsi_pll_vco_clk *vco,
-				unsigned long rate);
-long pll_20nm_vco_round_rate(struct clk *c, unsigned long rate);
-enum handoff pll_20nm_vco_handoff(struct clk *c);
-int pll_20nm_vco_prepare(struct clk *c);
-void pll_20nm_vco_unprepare(struct clk *c);
-int pll_20nm_vco_enable_seq(struct mdss_pll_resources *dsi_pll_res);
-
-int set_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel);
-int set_shadow_bypass_lp_div_mux_sel(struct mux_clk *clk, int sel);
-int get_bypass_lp_div_mux_sel(struct mux_clk *clk);
-int fixed_hr_oclk2_set_div(struct div_clk *clk, int div);
-int shadow_fixed_hr_oclk2_set_div(struct div_clk *clk, int div);
-int fixed_hr_oclk2_get_div(struct div_clk *clk);
-int hr_oclk3_set_div(struct div_clk *clk, int div);
-int shadow_hr_oclk3_set_div(struct div_clk *clk, int div);
-int hr_oclk3_get_div(struct div_clk *clk);
-int ndiv_set_div(struct div_clk *clk, int div);
-int shadow_ndiv_set_div(struct div_clk *clk, int div);
-int ndiv_get_div(struct div_clk *clk);
-void __dsi_pll_disable(void __iomem *pll_base);
-
-int set_mdss_pixel_mux_sel(struct mux_clk *clk, int sel);
-int get_mdss_pixel_mux_sel(struct mux_clk *clk);
-int set_mdss_byte_mux_sel(struct mux_clk *clk, int sel);
-int get_mdss_byte_mux_sel(struct mux_clk *clk);
-
 #endif
diff --git a/drivers/clk/qcom/mdss/mdss-pll-util.c b/drivers/clk/qcom/mdss/mdss-pll-util.c
index 690c53f..4d79772 100644
--- a/drivers/clk/qcom/mdss/mdss-pll-util.c
+++ b/drivers/clk/qcom/mdss/mdss-pll-util.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -16,7 +16,6 @@
 #include <linux/kernel.h>
 #include <linux/err.h>
 #include <linux/string.h>
-#include <linux/clk/msm-clock-generic.h>
 #include <linux/of_address.h>
 #include <linux/dma-mapping.h>
 #include <linux/vmalloc.h>
diff --git a/drivers/clk/qcom/mdss/mdss-pll.c b/drivers/clk/qcom/mdss/mdss-pll.c
index c22fa80..0a0d303 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.c
+++ b/drivers/clk/qcom/mdss/mdss-pll.c
@@ -19,12 +19,8 @@
 #include <linux/err.h>
 #include <linux/delay.h>
 #include <linux/iopoll.h>
-#include <linux/clk/msm-clock-generic.h>
-
 #include "mdss-pll.h"
 #include "mdss-dsi-pll.h"
-#include "mdss-hdmi-pll.h"
-#include "mdss-dp-pll.h"
 
 int mdss_pll_resource_enable(struct mdss_pll_resources *pll_res, bool enable)
 {
@@ -128,32 +124,10 @@
 		goto err;
 	}
 
-	if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8996")) {
-		pll_res->pll_interface_type = MDSS_DSI_PLL_8996;
-		pll_res->target_id = MDSS_PLL_TARGET_8996;
-		pll_res->revision = 1;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8996_v2")) {
-		pll_res->pll_interface_type = MDSS_DSI_PLL_8996;
-		pll_res->target_id = MDSS_PLL_TARGET_8996;
-		pll_res->revision = 2;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_8998")) {
-		pll_res->pll_interface_type = MDSS_DSI_PLL_8998;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_dp_pll_8998")) {
-		pll_res->pll_interface_type = MDSS_DP_PLL_8998;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996")) {
-		pll_res->pll_interface_type = MDSS_HDMI_PLL_8996;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996_v2")) {
-		pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V2;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8996_v3")) {
-		pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V3;
-	} else if (!strcmp(compatible_stream,
-				"qcom,mdss_hdmi_pll_8996_v3_1p8")) {
-		pll_res->pll_interface_type = MDSS_HDMI_PLL_8996_V3_1_8;
-	} else if (!strcmp(compatible_stream, "qcom,mdss_hdmi_pll_8998")) {
-		pll_res->pll_interface_type = MDSS_HDMI_PLL_8998;
-	} else {
+	if (!strcmp(compatible_stream, "qcom,mdss_dsi_pll_10nm"))
+		pll_res->pll_interface_type = MDSS_DSI_PLL_10NM;
+	else
 		goto err;
-	}
 
 	return rc;
 
@@ -174,29 +148,8 @@
 	}
 
 	switch (pll_res->pll_interface_type) {
-	case MDSS_DSI_PLL_8996:
-		rc = dsi_pll_clock_register_8996(pdev, pll_res);
-		break;
-	case MDSS_DSI_PLL_8998:
-		rc = dsi_pll_clock_register_8998(pdev, pll_res);
-	case MDSS_DP_PLL_8998:
-		rc = dp_pll_clock_register_8998(pdev, pll_res);
-		break;
-	case MDSS_HDMI_PLL_8996:
-		rc = hdmi_8996_v1_pll_clock_register(pdev, pll_res);
-		break;
-	case MDSS_HDMI_PLL_8996_V2:
-		rc = hdmi_8996_v2_pll_clock_register(pdev, pll_res);
-		break;
-	case MDSS_HDMI_PLL_8996_V3:
-		rc = hdmi_8996_v3_pll_clock_register(pdev, pll_res);
-		break;
-	case MDSS_HDMI_PLL_8996_V3_1_8:
-		rc = hdmi_8996_v3_1p8_pll_clock_register(pdev, pll_res);
-		break;
-	case MDSS_HDMI_PLL_8998:
-		rc = hdmi_8998_pll_clock_register(pdev, pll_res);
-		break;
+	case MDSS_DSI_PLL_10NM:
+		rc = dsi_pll_clock_register_10nm(pdev, pll_res);
 	case MDSS_UNKNOWN_PLL:
 	default:
 		rc = -EINVAL;
@@ -392,15 +345,7 @@
 }
 
 static const struct of_device_id mdss_pll_dt_match[] = {
-	{.compatible = "qcom,mdss_dsi_pll_8996"},
-	{.compatible = "qcom,mdss_dsi_pll_8996_v2"},
-	{.compatible = "qcom,mdss_dsi_pll_8998"},
-	{.compatible = "qcom,mdss_hdmi_pll_8996"},
-	{.compatible = "qcom,mdss_hdmi_pll_8996_v2"},
-	{.compatible = "qcom,mdss_hdmi_pll_8996_v3"},
-	{.compatible = "qcom,mdss_hdmi_pll_8996_v3_1p8"},
-	{.compatible = "qcom,mdss_dp_pll_8998"},
-	{.compatible = "qcom,mdss_hdmi_pll_8998"},
+	{.compatible = "qcom,mdss_dsi_pll_10nm"},
 	{}
 };
 
diff --git a/drivers/clk/qcom/mdss/mdss-pll.h b/drivers/clk/qcom/mdss/mdss-pll.h
index 48dddf6..28b7ca6 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.h
+++ b/drivers/clk/qcom/mdss/mdss-pll.h
@@ -12,10 +12,16 @@
 
 #ifndef __MDSS_PLL_H
 #define __MDSS_PLL_H
-
-#include <linux/mdss_io_util.h>
-#include <linux/clk/msm-clock-generic.h>
+#include <linux/sde_io_util.h>
+#include <linux/clk-provider.h>
 #include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/clkdev.h>
+#include <linux/regmap.h>
+#include "../clk-regmap.h"
+#include "../clk-regmap-divider.h"
+#include "../clk-regmap-mux.h"
+
 
 #define MDSS_PLL_REG_W(base, offset, data)	\
 				writel_relaxed((data), (base) + (offset))
@@ -30,14 +36,7 @@
 			(base) + (offset))
 
 enum {
-	MDSS_DSI_PLL_8996,
-	MDSS_DSI_PLL_8998,
-	MDSS_DP_PLL_8998,
-	MDSS_HDMI_PLL_8996,
-	MDSS_HDMI_PLL_8996_V2,
-	MDSS_HDMI_PLL_8996_V3,
-	MDSS_HDMI_PLL_8996_V3_1_8,
-	MDSS_HDMI_PLL_8998,
+	MDSS_DSI_PLL_10NM,
 	MDSS_UNKNOWN_PLL,
 };
 
@@ -200,20 +199,24 @@
 		(!(readl_relaxed(pll_res->gdsc_base) & BIT(0)))) ? false : true;
 }
 
-static inline int mdss_pll_div_prepare(struct clk *c)
+static inline int mdss_pll_div_prepare(struct clk_hw *hw)
 {
-	struct div_clk *div = to_div_clk(c);
+	struct clk_hw *parent_hw = clk_hw_get_parent(hw);
 	/* Restore the divider's value */
-	return div->ops->set_div(div, div->data.div);
+	return hw->init->ops->set_rate(hw, clk_hw_get_rate(hw),
+				clk_hw_get_rate(parent_hw));
 }
 
-static inline int mdss_set_mux_sel(struct mux_clk *clk, int sel)
+static inline int mdss_set_mux_sel(void *context, unsigned int reg,
+					unsigned int val)
 {
 	return 0;
 }
 
-static inline int mdss_get_mux_sel(struct mux_clk *clk)
+static inline int mdss_get_mux_sel(void *context, unsigned int reg,
+					unsigned int *val)
 {
+	*val = 0;
 	return 0;
 }
 
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4d2b81f..fd02eba 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -385,6 +385,58 @@
 	  Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES
 	  algorithms execution.
 
+config CRYPTO_DEV_QCE50
+        bool
+
+config FIPS_ENABLE
+        bool "FIPS140-2 compliant build"
+        default n
+        help
+          This flag is used to make current build FIPS140-2
+          compliant. This flag will enable the patch of code
+          which will perform this task. Please select Y here
+          to enable.
+
+config CRYPTO_DEV_QCRYPTO
+        tristate "QTI Crypto accelerator"
+        select CRYPTO_DES
+        select CRYPTO_ALGAPI
+        select CRYPTO_AUTHENC
+        select CRYPTO_BLKCIPHER
+        default n
+        help
+          This driver supports QTI crypto acceleration
+          for kernel clients. To compile this driver as a module,
+          choose M here: the module will be called qcrypto. Please
+          select Y here to enable.
+
+config CRYPTO_DEV_QCOM_MSM_QCE
+        tristate "QTI Crypto Engine (QCE) module"
+        default n
+        help
+          This driver supports QTI Crypto Engine 5.0.
+          To compile this driver as a module, choose M here: the
+          module is called qce50.
+
+config CRYPTO_DEV_QCEDEV
+        tristate "QCEDEV Interface to CE module"
+        default n
+        help
+          This driver supports QTI QCEDEV Crypto Engine 5.0.
+          This exposes the interface to the QCE hardware accelerator
+          via IOCTLs.
+
+          To compile this driver as a module, choose M here: the
+          module will be called qcedev.
+
+config CRYPTO_DEV_OTA_CRYPTO
+        tristate "OTA Crypto module"
+        help
+          This driver supports QTI OTA Crypto in the FSM9xxx.
+          To compile this driver as a module, choose M here: the
+          module will be called ota_crypto. Please select Y here
+          to enable.
+
 config CRYPTO_DEV_NX
 	bool "Support for IBM PowerPC Nest (NX) cryptographic acceleration"
 	depends on PPC64
@@ -555,4 +607,8 @@
 
 source "drivers/crypto/chelsio/Kconfig"
 
+if ARCH_QCOM
+source drivers/crypto/msm/Kconfig
+endif # ARCH_QCOM
+
 endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index ad7250f..5f7b988 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -28,6 +28,7 @@
 obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
 obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
 obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
+obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += msm/
 obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
 obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
 obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
diff --git a/drivers/crypto/msm/Kconfig b/drivers/crypto/msm/Kconfig
new file mode 100644
index 0000000..0f4568b
--- /dev/null
+++ b/drivers/crypto/msm/Kconfig
@@ -0,0 +1,10 @@
+
+config CRYPTO_DEV_QCOM_ICE
+	tristate "Inline Crypto Module"
+	default n
+	depends on PFK && BLK_DEV_DM
+	help
+	  This driver supports Inline Crypto Engine for QTI chipsets, MSM8994
+	  and later, to accelerate crypto operations for storage needs.
+	  To compile this driver as a module, choose M here: the
+	  module will be called ice.
diff --git a/drivers/crypto/msm/Makefile b/drivers/crypto/msm/Makefile
new file mode 100644
index 0000000..9ecb646
--- /dev/null
+++ b/drivers/crypto/msm/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += qce50.o
+obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev.o
+obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto.o
+obj-$(CONFIG_CRYPTO_DEV_OTA_CRYPTO) += ota_crypto.o
+obj-$(CONFIG_CRYPTO_DEV_QCOM_ICE) += ice.o
diff --git a/drivers/crypto/msm/compat_qcedev.c b/drivers/crypto/msm/compat_qcedev.c
new file mode 100644
index 0000000..0ca28be
--- /dev/null
+++ b/drivers/crypto/msm/compat_qcedev.c
@@ -0,0 +1,431 @@
+/*
+ * QTI CE 32-bit compatibility syscall for 64-bit systems
+ *
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/qcedev.h>
+#include <linux/compat.h>
+#include "compat_qcedev.h"
+
+static int compat_get_qcedev_pmem_info(
+		struct compat_qcedev_pmem_info __user *pmem32,
+		struct qcedev_pmem_info __user *pmem)
+{
+	compat_ulong_t offset;
+	compat_int_t fd_src;
+	compat_int_t fd_dst;
+	int err = 0, i = 0;
+	uint32_t len;
+
+	err |= get_user(fd_src, &pmem32->fd_src);
+	err |= put_user(fd_src, &pmem->fd_src);
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(offset, &pmem32->src[i].offset);
+		err |= put_user(offset, &pmem->src[i].offset);
+		err |= get_user(len, &pmem32->src[i].len);
+		err |= put_user(len, &pmem->src[i].len);
+	}
+
+	err |= get_user(fd_dst, &pmem32->fd_dst);
+	err |= put_user(fd_dst, &pmem->fd_dst);
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(offset, &pmem32->dst[i].offset);
+		err |= put_user(offset, &pmem->dst[i].offset);
+		err |= get_user(len, &pmem32->dst[i].len);
+		err |= put_user(len, &pmem->dst[i].len);
+	}
+
+	return err;
+}
+
+static int compat_put_qcedev_pmem_info(
+		struct compat_qcedev_pmem_info __user *pmem32,
+		struct qcedev_pmem_info __user *pmem)
+{
+	compat_ulong_t offset;
+	compat_int_t fd_src;
+	compat_int_t fd_dst;
+	int err = 0, i = 0;
+	uint32_t len;
+
+	err |= get_user(fd_src, &pmem->fd_src);
+	err |= put_user(fd_src, &pmem32->fd_src);
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(offset, &pmem->src[i].offset);
+		err |= put_user(offset, &pmem32->src[i].offset);
+		err |= get_user(len, &pmem->src[i].len);
+		err |= put_user(len, &pmem32->src[i].len);
+	}
+
+	err |= get_user(fd_dst, &pmem->fd_dst);
+	err |= put_user(fd_dst, &pmem32->fd_dst);
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(offset, &pmem->dst[i].offset);
+		err |= put_user(offset, &pmem32->dst[i].offset);
+		err |= get_user(len, &pmem->dst[i].len);
+		err |= put_user(len, &pmem32->dst[i].len);
+	}
+
+	return err;
+}
+
+static int compat_get_qcedev_vbuf_info(
+		struct compat_qcedev_vbuf_info __user *vbuf32,
+		struct qcedev_vbuf_info __user *vbuf)
+{
+	compat_uptr_t vaddr;
+	int err = 0, i = 0;
+	uint32_t len;
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(vaddr, &vbuf32->src[i].vaddr);
+		vbuf->src[i].vaddr = NULL;
+		err |= put_user(vaddr, (compat_uptr_t *)&vbuf->src[i].vaddr);
+		err |= get_user(len, &vbuf32->src[i].len);
+		err |= put_user(len, &vbuf->src[i].len);
+	}
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(vaddr, &vbuf32->dst[i].vaddr);
+		vbuf->dst[i].vaddr = NULL;
+		err |= put_user(vaddr, (compat_uptr_t *)&vbuf->dst[i].vaddr);
+		err |= get_user(len, &vbuf32->dst[i].len);
+		err |= put_user(len, &vbuf->dst[i].len);
+	}
+	return err;
+}
+
+static int compat_put_qcedev_vbuf_info(
+		struct compat_qcedev_vbuf_info __user *vbuf32,
+		struct qcedev_vbuf_info __user *vbuf)
+{
+	compat_uptr_t vaddr;
+	int err = 0, i = 0;
+	uint32_t len;
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(vaddr, (compat_uptr_t *)&vbuf->src[i].vaddr);
+		vbuf32->src[i].vaddr = 0;
+		err |= put_user(vaddr, &vbuf32->src[i].vaddr);
+		err |= get_user(len, &vbuf->src[i].len);
+		err |= put_user(len, &vbuf32->src[i].len);
+	}
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(vaddr, (compat_uptr_t *)&vbuf->dst[i].vaddr);
+		vbuf32->dst[i].vaddr = 0;
+		err |= put_user(vaddr, &vbuf32->dst[i].vaddr);
+		err |= get_user(len, &vbuf->dst[i].len);
+		err |= put_user(len, &vbuf32->dst[i].len);
+	}
+	return err;
+}
+
+static int compat_get_qcedev_cipher_op_req(
+		struct compat_qcedev_cipher_op_req __user *data32,
+		struct qcedev_cipher_op_req __user *data)
+{
+	enum qcedev_cipher_mode_enum mode;
+	enum qcedev_cipher_alg_enum alg;
+	compat_ulong_t byteoffset;
+	enum qcedev_oper_enum op;
+	compat_ulong_t data_len;
+	compat_ulong_t encklen;
+	compat_ulong_t entries;
+	compat_ulong_t ivlen;
+	uint8_t in_place_op;
+	int err = 0, i = 0;
+	uint8_t use_pmem;
+	uint8_t enckey;
+	uint8_t iv;
+
+	err |= get_user(use_pmem, &data32->use_pmem);
+	err |= put_user(use_pmem, &data->use_pmem);
+
+	if (use_pmem)
+		err |= compat_get_qcedev_pmem_info(&data32->pmem, &data->pmem);
+	else
+		err |= compat_get_qcedev_vbuf_info(&data32->vbuf, &data->vbuf);
+
+	err |= get_user(entries, &data32->entries);
+	err |= put_user(entries, &data->entries);
+	err |= get_user(data_len, &data32->data_len);
+	err |= put_user(data_len, &data->data_len);
+	err |= get_user(in_place_op, &data32->in_place_op);
+	err |= put_user(in_place_op, &data->in_place_op);
+
+	for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+		err |= get_user(enckey, &(data32->enckey[i]));
+		err |= put_user(enckey, &(data->enckey[i]));
+	}
+
+	err |= get_user(encklen, &data32->encklen);
+	err |= put_user(encklen, &data->encklen);
+
+	for (i = 0; i < QCEDEV_MAX_IV_SIZE; i++) {
+		err |= get_user(iv, &(data32->iv[i]));
+		err |= put_user(iv, &(data->iv[i]));
+	}
+
+	err |= get_user(ivlen, &data32->ivlen);
+	err |= put_user(ivlen, &data->ivlen);
+	err |= get_user(byteoffset, &data32->byteoffset);
+	err |= put_user(byteoffset, &data->byteoffset);
+	err |= get_user(alg, &data32->alg);
+	err |= put_user(alg, &data->alg);
+	err |= get_user(mode, &data32->mode);
+	err |= put_user(mode, &data->mode);
+	err |= get_user(op, &data32->op);
+	err |= put_user(op, &data->op);
+
+	return err;
+}
+
+static int compat_put_qcedev_cipher_op_req(
+		struct compat_qcedev_cipher_op_req __user *data32,
+		struct qcedev_cipher_op_req __user *data)
+{
+	enum qcedev_cipher_mode_enum mode;
+	enum qcedev_cipher_alg_enum alg;
+	compat_ulong_t byteoffset;
+	enum qcedev_oper_enum op;
+	compat_ulong_t data_len;
+	compat_ulong_t encklen;
+	compat_ulong_t entries;
+	compat_ulong_t ivlen;
+	uint8_t in_place_op;
+	int err = 0, i = 0;
+	uint8_t use_pmem;
+	uint8_t enckey;
+	uint8_t iv;
+
+	err |= get_user(use_pmem, &data->use_pmem);
+	err |= put_user(use_pmem, &data32->use_pmem);
+
+	if (use_pmem)
+		err |= compat_put_qcedev_pmem_info(&data32->pmem, &data->pmem);
+	else
+		err |= compat_put_qcedev_vbuf_info(&data32->vbuf, &data->vbuf);
+
+	err |= get_user(entries, &data->entries);
+	err |= put_user(entries, &data32->entries);
+	err |= get_user(data_len, &data->data_len);
+	err |= put_user(data_len, &data32->data_len);
+	err |= get_user(in_place_op, &data->in_place_op);
+	err |= put_user(in_place_op, &data32->in_place_op);
+
+	for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+		err |= get_user(enckey, &(data->enckey[i]));
+		err |= put_user(enckey, &(data32->enckey[i]));
+	}
+
+	err |= get_user(encklen, &data->encklen);
+	err |= put_user(encklen, &data32->encklen);
+
+	for (i = 0; i < QCEDEV_MAX_IV_SIZE; i++) {
+		err |= get_user(iv, &(data->iv[i]));
+		err |= put_user(iv, &(data32->iv[i]));
+	}
+
+	err |= get_user(ivlen, &data->ivlen);
+	err |= put_user(ivlen, &data32->ivlen);
+	err |= get_user(byteoffset, &data->byteoffset);
+	err |= put_user(byteoffset, &data32->byteoffset);
+	err |= get_user(alg, &data->alg);
+	err |= put_user(alg, &data32->alg);
+	err |= get_user(mode, &data->mode);
+	err |= put_user(mode, &data32->mode);
+	err |= get_user(op, &data->op);
+	err |= put_user(op, &data32->op);
+
+	return err;
+}
+
+static int compat_get_qcedev_sha_op_req(
+		struct compat_qcedev_sha_op_req __user *data32,
+		struct qcedev_sha_op_req __user *data)
+{
+	enum qcedev_sha_alg_enum alg;
+	compat_ulong_t authklen;
+	compat_ulong_t data_len;
+	compat_ulong_t entries;
+	compat_ulong_t diglen;
+	compat_uptr_t authkey;
+	compat_uptr_t vaddr;
+	int err = 0, i = 0;
+	uint8_t digest;
+	uint32_t len;
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(vaddr, &data32->data[i].vaddr);
+		data->data[i].vaddr = 0;
+		err |= put_user(vaddr, (compat_uptr_t *)&data->data[i].vaddr);
+		err |= get_user(len, &data32->data[i].len);
+		err |= put_user(len, &data->data[i].len);
+	}
+
+	err |= get_user(entries, &data32->entries);
+	err |= put_user(entries, &data->entries);
+	err |= get_user(data_len, &data32->data_len);
+	err |= put_user(data_len, &data->data_len);
+
+	for (i = 0; i < QCEDEV_MAX_SHA_DIGEST; i++) {
+		err |= get_user(digest, &(data32->digest[i]));
+		err |= put_user(digest, &(data->digest[i]));
+	}
+
+	err |= get_user(diglen, &data32->diglen);
+	err |= put_user(diglen, &data->diglen);
+	err |= get_user(authkey, &data32->authkey);
+	data->authkey = NULL;
+	err |= put_user(authkey, (compat_uptr_t *)&data->authkey);
+	err |= get_user(authklen, &data32->authklen);
+	err |= put_user(authklen, &data->authklen);
+	err |= get_user(alg, &data32->alg);
+	err |= put_user(alg, &data->alg);
+
+	return err;
+}
+
+static int compat_put_qcedev_sha_op_req(
+		struct compat_qcedev_sha_op_req __user *data32,
+		struct qcedev_sha_op_req __user *data)
+{
+	enum qcedev_sha_alg_enum alg;
+	compat_ulong_t authklen;
+	compat_ulong_t data_len;
+	compat_ulong_t entries;
+	compat_ulong_t diglen;
+	compat_uptr_t authkey;
+	compat_uptr_t vaddr;
+	int err = 0, i = 0;
+	uint8_t digest;
+	uint32_t len;
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(vaddr, (compat_uptr_t *)&data->data[i].vaddr);
+		data32->data[i].vaddr = 0;
+		err |= put_user(vaddr, &data32->data[i].vaddr);
+		err |= get_user(len, &data->data[i].len);
+		err |= put_user(len, &data32->data[i].len);
+	}
+
+	err |= get_user(entries, &data->entries);
+	err |= put_user(entries, &data32->entries);
+	err |= get_user(data_len, &data->data_len);
+	err |= put_user(data_len, &data32->data_len);
+
+	for (i = 0; i < QCEDEV_MAX_SHA_DIGEST; i++) {
+		err |= get_user(digest, &(data->digest[i]));
+		err |= put_user(digest, &(data32->digest[i]));
+	}
+
+	err |= get_user(diglen, &data->diglen);
+	err |= put_user(diglen, &data32->diglen);
+	err |= get_user(authkey, (compat_uptr_t *)&data->authkey);
+	data32->authkey = 0;
+	err |= put_user(authkey, &data32->authkey);
+	err |= get_user(authklen, &data->authklen);
+	err |= put_user(authklen, &data32->authklen);
+	err |= get_user(alg, &data->alg);
+	err |= put_user(alg, &data32->alg);
+
+	return err;
+}
+
+static unsigned int convert_cmd(unsigned int cmd)
+{
+	switch (cmd) {
+	case COMPAT_QCEDEV_IOCTL_ENC_REQ:
+		return QCEDEV_IOCTL_ENC_REQ;
+	case COMPAT_QCEDEV_IOCTL_DEC_REQ:
+		return QCEDEV_IOCTL_DEC_REQ;
+	case COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ:
+		return QCEDEV_IOCTL_SHA_INIT_REQ;
+	case COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ:
+		return QCEDEV_IOCTL_SHA_UPDATE_REQ;
+	case COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ:
+		return QCEDEV_IOCTL_SHA_FINAL_REQ;
+	case COMPAT_QCEDEV_IOCTL_GET_SHA_REQ:
+		return QCEDEV_IOCTL_GET_SHA_REQ;
+	case COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ:
+		return QCEDEV_IOCTL_GET_CMAC_REQ;
+	default:
+		return cmd;
+	}
+
+}
+
+long compat_qcedev_ioctl(struct file *file,
+		unsigned int cmd, unsigned long arg)
+{
+	long ret;
+
+	switch (cmd) {
+	case COMPAT_QCEDEV_IOCTL_ENC_REQ:
+	case COMPAT_QCEDEV_IOCTL_DEC_REQ: {
+		struct compat_qcedev_cipher_op_req __user *data32;
+		struct qcedev_cipher_op_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (!data)
+			return -EFAULT;
+
+		err = compat_get_qcedev_cipher_op_req(data32, data);
+		if (err)
+			return err;
+
+		ret = qcedev_ioctl(file, convert_cmd(cmd), (unsigned long)data);
+		err = compat_put_qcedev_cipher_op_req(data32, data);
+		return ret ? ret : err;
+	}
+	case COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ:
+	case COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ:
+	case COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ:
+	case COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ:
+	case COMPAT_QCEDEV_IOCTL_GET_SHA_REQ: {
+		struct compat_qcedev_sha_op_req __user *data32;
+		struct qcedev_sha_op_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (!data)
+			return -EFAULT;
+
+		err = compat_get_qcedev_sha_op_req(data32, data);
+		if (err)
+			return err;
+
+		ret = qcedev_ioctl(file, convert_cmd(cmd), (unsigned long)data);
+		err = compat_put_qcedev_sha_op_req(data32, data);
+		return ret ? ret : err;
+	}
+	default:
+		return -ENOIOCTLCMD;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(compat_qcedev_ioctl);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI 32-64 Compatibility for Crypto driver");
diff --git a/drivers/crypto/msm/compat_qcedev.h b/drivers/crypto/msm/compat_qcedev.h
new file mode 100644
index 0000000..4cc3933
--- /dev/null
+++ b/drivers/crypto/msm/compat_qcedev.h
@@ -0,0 +1,165 @@
+#ifndef _UAPI_COMPAT_QCEDEV__H
+#define _UAPI_COMPAT_QCEDEV__H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#if IS_ENABLED(CONFIG_COMPAT)
+#include <linux/compat.h>
+
+/**
+ * struct compat_buf_info - Buffer information
+ * @offset:			Offset from the base address of the buffer
+ *				(Used when buffer is allocated using PMEM)
+ * @vaddr:			Virtual buffer address pointer
+ * @len:				Size of the buffer
+ */
+struct	compat_buf_info {
+	union {
+		compat_ulong_t	offset;
+		compat_uptr_t	vaddr;
+	};
+	compat_ulong_t	len;
+};
+
+/**
+ * struct compat_qcedev_vbuf_info - Source and destination Buffer information
+ * @src:				Array of buf_info for input/source
+ * @dst:				Array of buf_info for output/destination
+ */
+struct	compat_qcedev_vbuf_info {
+	struct compat_buf_info	src[QCEDEV_MAX_BUFFERS];
+	struct compat_buf_info	dst[QCEDEV_MAX_BUFFERS];
+};
+
+/**
+ * struct compat_qcedev_pmem_info - Stores PMEM buffer information
+ * @fd_src:			Handle to /dev/adsp_pmem used to allocate
+ *				memory for input/src buffer
+ * @src:				Array of buf_info for input/source
+ * @fd_dst:			Handle to /dev/adsp_pmem used to allocate
+ *				memory for output/dst buffer
+ * @dst:				Array of buf_info for output/destination
+ * @pmem_src_offset:		The offset from input/src buffer
+ *				(allocated by PMEM)
+ */
+struct	compat_qcedev_pmem_info {
+	compat_int_t		fd_src;
+	struct compat_buf_info	src[QCEDEV_MAX_BUFFERS];
+	compat_int_t		fd_dst;
+	struct compat_buf_info	dst[QCEDEV_MAX_BUFFERS];
+};
+
+/**
+ * struct compat_qcedev_cipher_op_req - Holds the ciphering request information
+ * @use_pmem (IN):	Flag to indicate if buffer source is PMEM
+ *			QCEDEV_USE_PMEM/QCEDEV_NO_PMEM
+ * @pmem (IN):		Stores PMEM buffer information.
+ *			Refer struct qcedev_pmem_info
+ * @vbuf (IN/OUT):	Stores Source and destination Buffer information
+ *			Refer to struct qcedev_vbuf_info
+ * @data_len (IN):	Total Length of input/src and output/dst in bytes
+ * @in_place_op (IN):	Indicates whether the operation is inplace where
+ *			source == destination
+ *			When using PMEM allocated memory, must set this to 1
+ * @enckey (IN):		128 bits of confidentiality key
+ *			enckey[0] bit 127-120, enckey[1] bit 119-112,..
+ *			enckey[15] bit 7-0
+ * @encklen (IN):	Length of the encryption key(set to 128  bits/16
+ *			bytes in the driver)
+ * @iv (IN/OUT):		Initialization vector data
+ *			This is updated by the driver, incremented by
+ *			number of blocks encrypted/decrypted.
+ * @ivlen (IN):		Length of the IV
+ * @byteoffset (IN):	Offset in the Cipher BLOCK (applicable and to be set
+ *			for AES-128 CTR mode only)
+ * @alg (IN):		Type of ciphering algorithm: AES/DES/3DES
+ * @mode (IN):		Mode use when using AES algorithm: ECB/CBC/CTR
+ *			Applicable when using AES algorithm only
+ * @op (IN):		Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or
+ *			QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY
+ *
+ * If use_pmem is set to 0, the driver assumes that memory was not allocated
+ * via PMEM, and kernel will need to allocate memory and copy data from user
+ * space buffer (data_src/dta_dst) and process accordingly and copy data back
+ * to the user space buffer
+ *
+ * If use_pmem is set to 1, the driver assumes that memory was allocated via
+ * PMEM.
+ * The kernel driver will use the fd_src to determine the kernel virtual address
+ * base that maps to the user space virtual address base for the  buffer
+ * allocated in user space.
+ * The final input/src and output/dst buffer pointer will be determined
+ * by adding the offsets to the kernel virtual addr.
+ *
+ * If use of hardware key is supported in the target, user can configure the
+ * key parameters (encklen, enckey) to use the hardware key.
+ * In order to use the hardware key, set encklen to 0 and set the enckey
+ * data array to 0.
+ */
+struct	compat_qcedev_cipher_op_req {
+	uint8_t					use_pmem;
+	union {
+		struct compat_qcedev_pmem_info	pmem;
+		struct compat_qcedev_vbuf_info	vbuf;
+	};
+	compat_ulong_t				entries;
+	compat_ulong_t				data_len;
+	uint8_t					in_place_op;
+	uint8_t					enckey[QCEDEV_MAX_KEY_SIZE];
+	compat_ulong_t				encklen;
+	uint8_t					iv[QCEDEV_MAX_IV_SIZE];
+	compat_ulong_t				ivlen;
+	compat_ulong_t				byteoffset;
+	enum qcedev_cipher_alg_enum		alg;
+	enum qcedev_cipher_mode_enum		mode;
+	enum qcedev_oper_enum			op;
+};
+
+/**
+ * struct qcedev_sha_op_req - Holds the hashing request information
+ * @data (IN):			Array of pointers to the data to be hashed
+ * @entries (IN):		Number of buf_info entries in the data array
+ * @data_len (IN):		Length of data to be hashed
+ * @digest (IN/OUT):		Returns the hashed data information
+ * @diglen (OUT):		Size of the hashed/digest data
+ * @authkey (IN):		Pointer to authentication key for HMAC
+ * @authklen (IN):		Size of the authentication key
+ * @alg (IN):			Secure Hash algorithm
+ */
+struct	compat_qcedev_sha_op_req {
+	struct compat_buf_info			data[QCEDEV_MAX_BUFFERS];
+	compat_ulong_t				entries;
+	compat_ulong_t				data_len;
+	uint8_t					digest[QCEDEV_MAX_SHA_DIGEST];
+	compat_ulong_t				diglen;
+	compat_uptr_t				authkey;
+	compat_ulong_t				authklen;
+	enum qcedev_sha_alg_enum		alg;
+};
+
+struct file;
+extern long compat_qcedev_ioctl(struct file *file,
+			unsigned int cmd, unsigned long arg);
+
+#define COMPAT_QCEDEV_IOCTL_ENC_REQ		\
+	_IOWR(QCEDEV_IOC_MAGIC, 1, struct compat_qcedev_cipher_op_req)
+#define COMPAT_QCEDEV_IOCTL_DEC_REQ		\
+	_IOWR(QCEDEV_IOC_MAGIC, 2, struct compat_qcedev_cipher_op_req)
+#define COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 3, struct compat_qcedev_sha_op_req)
+#define COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 4, struct compat_qcedev_sha_op_req)
+#define COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 5, struct compat_qcedev_sha_op_req)
+#define COMPAT_QCEDEV_IOCTL_GET_SHA_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 6, struct compat_qcedev_sha_op_req)
+#define COMPAT_QCEDEV_IOCTL_LOCK_CE	\
+	_IO(QCEDEV_IOC_MAGIC, 7)
+#define COMPAT_QCEDEV_IOCTL_UNLOCK_CE	\
+	_IO(QCEDEV_IOC_MAGIC, 8)
+#define COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 9, struct compat_qcedev_sha_op_req)
+
+#endif /* CONFIG_COMPAT */
+#endif /* _UAPI_COMPAT_QCEDEV__H */
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
new file mode 100644
index 0000000..ba6825e
--- /dev/null
+++ b/drivers/crypto/msm/ice.c
@@ -0,0 +1,1761 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/device-mapper.h>
+#include <linux/clk.h>
+#include <linux/cdev.h>
+#include <linux/regulator/consumer.h>
+#include <linux/msm-bus.h>
+#include <linux/pfk.h>
+#include <crypto/ice.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/qseecomi.h>
+#include "iceregs.h"
+
+#define TZ_SYSCALL_CREATE_SMC_ID(o, s, f) \
+	((uint32_t)((((o & 0x3f) << 24) | (s & 0xff) << 8) | (f & 0xff)))
+
+#define TZ_OWNER_QSEE_OS                 50
+#define TZ_SVC_KEYSTORE                  5     /* Keystore management */
+
+#define TZ_OS_KS_RESTORE_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x06)
+
+#define TZ_SYSCALL_CREATE_PARAM_ID_0 0
+
+#define TZ_OS_KS_RESTORE_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_OS_KS_RESTORE_KEY_CONFIG_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x06)
+
+#define TZ_OS_KS_RESTORE_KEY_CONFIG_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+
+#define ICE_REV(x, y) (((x) & ICE_CORE_##y##_REV_MASK) >> ICE_CORE_##y##_REV)
+#define QCOM_UFS_ICE_DEV	"iceufs"
+#define QCOM_SDCC_ICE_DEV	"icesdcc"
+#define QCOM_ICE_TYPE_NAME_LEN 8
+#define QCOM_ICE_MAX_BIST_CHECK_COUNT 100
+#define QCOM_ICE_UFS		10
+#define QCOM_ICE_SDCC		20
+
+struct ice_clk_info {
+	struct list_head list;
+	struct clk *clk;
+	const char *name;
+	u32 max_freq;
+	u32 min_freq;
+	u32 curr_freq;
+	bool enabled;
+};
+
+struct qcom_ice_bus_vote {
+	uint32_t client_handle;
+	uint32_t curr_vote;
+	int min_bw_vote;
+	int max_bw_vote;
+	int saved_vote;
+	bool is_max_bw_needed;
+	struct device_attribute max_bus_bw;
+};
+
+static LIST_HEAD(ice_devices);
+/*
+ * ICE HW device structure.
+ */
+struct ice_device {
+	struct list_head	list;
+	struct device		*pdev;
+	struct cdev		cdev;
+	dev_t			device_no;
+	struct class		*driver_class;
+	void __iomem		*mmio;
+	struct resource		*res;
+	int			irq;
+	bool			is_ice_enabled;
+	bool			is_ice_disable_fuse_blown;
+	ice_error_cb		error_cb;
+	void			*host_controller_data; /* UFS/EMMC/other? */
+	struct list_head	clk_list_head;
+	u32			ice_hw_version;
+	bool			is_ice_clk_available;
+	char			ice_instance_type[QCOM_ICE_TYPE_NAME_LEN];
+	struct regulator	*reg;
+	bool			is_regulator_available;
+	struct qcom_ice_bus_vote bus_vote;
+	ktime_t			ice_reset_start_time;
+	ktime_t			ice_reset_complete_time;
+};
+
+static int qti_ice_setting_config(struct request *req,
+		struct platform_device *pdev,
+		struct ice_crypto_setting *crypto_data,
+		struct ice_data_setting *setting)
+{
+	struct ice_device *ice_dev = NULL;
+
+	ice_dev = platform_get_drvdata(pdev);
+
+	if (!ice_dev) {
+		pr_debug("%s no ICE device\n", __func__);
+
+		/* make the caller finish peacfully */
+		return 0;
+	}
+
+	if (ice_dev->is_ice_disable_fuse_blown) {
+		pr_err("%s ICE disabled fuse is blown\n", __func__);
+		return -EPERM;
+	}
+
+	if ((short)(crypto_data->key_index) >= 0) {
+
+		memcpy(&setting->crypto_data, crypto_data,
+				sizeof(setting->crypto_data));
+
+		if (rq_data_dir(req) == WRITE)
+			setting->encr_bypass = false;
+		else if (rq_data_dir(req) == READ)
+			setting->decr_bypass = false;
+		else {
+			/* Should I say BUG_ON */
+			setting->encr_bypass = true;
+			setting->decr_bypass = true;
+		}
+	}
+
+	return 0;
+}
+
+static int qcom_ice_enable_clocks(struct ice_device *, bool);
+
+#ifdef CONFIG_MSM_BUS_SCALING
+
+static int qcom_ice_set_bus_vote(struct ice_device *ice_dev, int vote)
+{
+	int err = 0;
+
+	if (vote != ice_dev->bus_vote.curr_vote) {
+		err = msm_bus_scale_client_update_request(
+				ice_dev->bus_vote.client_handle, vote);
+		if (err) {
+			dev_err(ice_dev->pdev,
+				"%s:failed:client_handle=0x%x, vote=%d, err=%d\n",
+				__func__, ice_dev->bus_vote.client_handle,
+				vote, err);
+			goto out;
+		}
+		ice_dev->bus_vote.curr_vote = vote;
+	}
+out:
+	return err;
+}
+
+static int qcom_ice_get_bus_vote(struct ice_device *ice_dev,
+		const char *speed_mode)
+{
+	struct device *dev = ice_dev->pdev;
+	struct device_node *np = dev->of_node;
+	int err;
+	const char *key = "qcom,bus-vector-names";
+
+	if (!speed_mode) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (ice_dev->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
+		err = of_property_match_string(np, key, "MAX");
+	else
+		err = of_property_match_string(np, key, speed_mode);
+out:
+	if (err < 0)
+		dev_err(dev, "%s: Invalid %s mode %d\n",
+				__func__, speed_mode, err);
+	return err;
+}
+
+static int qcom_ice_bus_register(struct ice_device *ice_dev)
+{
+	int err = 0;
+	struct msm_bus_scale_pdata *bus_pdata;
+	struct device *dev = ice_dev->pdev;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct device_node *np = dev->of_node;
+
+	bus_pdata = msm_bus_cl_get_pdata(pdev);
+	if (!bus_pdata) {
+		dev_err(dev, "%s: failed to get bus vectors\n", __func__);
+		err = -ENODATA;
+		goto out;
+	}
+
+	err = of_property_count_strings(np, "qcom,bus-vector-names");
+	if (err < 0 || err != bus_pdata->num_usecases) {
+		dev_err(dev, "%s: Error = %d with qcom,bus-vector-names\n",
+				__func__, err);
+		goto out;
+	}
+	err = 0;
+
+	ice_dev->bus_vote.client_handle =
+			msm_bus_scale_register_client(bus_pdata);
+	if (!ice_dev->bus_vote.client_handle) {
+		dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
+				__func__);
+		err = -EFAULT;
+		goto out;
+	}
+
+	/* cache the vote index for minimum and maximum bandwidth */
+	ice_dev->bus_vote.min_bw_vote = qcom_ice_get_bus_vote(ice_dev, "MIN");
+	ice_dev->bus_vote.max_bw_vote = qcom_ice_get_bus_vote(ice_dev, "MAX");
+out:
+	return err;
+}
+
+#else
+
+static int qcom_ice_set_bus_vote(struct ice_device *ice_dev, int vote)
+{
+	return 0;
+}
+
+static int qcom_ice_get_bus_vote(struct ice_device *ice_dev,
+		const char *speed_mode)
+{
+	return 0;
+}
+
+static int qcom_ice_bus_register(struct ice_device *ice_dev)
+{
+	return 0;
+}
+#endif /* CONFIG_MSM_BUS_SCALING */
+
+static int qcom_ice_get_vreg(struct ice_device *ice_dev)
+{
+	int ret = 0;
+
+	if (!ice_dev->is_regulator_available)
+		return 0;
+
+	if (ice_dev->reg)
+		return 0;
+
+	ice_dev->reg = devm_regulator_get(ice_dev->pdev, "vdd-hba");
+	if (IS_ERR(ice_dev->reg)) {
+		ret = PTR_ERR(ice_dev->reg);
+		dev_err(ice_dev->pdev, "%s: %s get failed, err=%d\n",
+			__func__, "vdd-hba-supply", ret);
+	}
+	return ret;
+}
+
+static void qcom_ice_config_proc_ignore(struct ice_device *ice_dev)
+{
+	u32 regval;
+
+	if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2 &&
+	    ICE_REV(ice_dev->ice_hw_version, MINOR) == 0 &&
+	    ICE_REV(ice_dev->ice_hw_version, STEP) == 0) {
+		regval = qcom_ice_readl(ice_dev,
+				QCOM_ICE_REGS_ADVANCED_CONTROL);
+		regval |= 0x800;
+		qcom_ice_writel(ice_dev, regval,
+				QCOM_ICE_REGS_ADVANCED_CONTROL);
+		/* Ensure register is updated */
+		mb();
+	}
+}
+
+static void qcom_ice_low_power_mode_enable(struct ice_device *ice_dev)
+{
+	u32 regval;
+
+	regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ADVANCED_CONTROL);
+	/*
+	 * Enable low power mode sequence
+	 * [0]-0, [1]-0, [2]-0, [3]-E, [4]-0, [5]-0, [6]-0, [7]-0
+	 */
+	regval |= 0x7000;
+	qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_ADVANCED_CONTROL);
+	/*
+	 * Ensure previous instructions was completed before issuing next
+	 * ICE initialization/optimization instruction
+	 */
+	mb();
+}
+
+static void qcom_ice_enable_test_bus_config(struct ice_device *ice_dev)
+{
+	/*
+	 * Configure & enable ICE_TEST_BUS_REG to reflect ICE intr lines
+	 * MAIN_TEST_BUS_SELECTOR = 0 (ICE_CONFIG)
+	 * TEST_BUS_REG_EN = 1 (ENABLE)
+	 */
+	u32 regval;
+
+	if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
+		return;
+
+	regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_TEST_BUS_CONTROL);
+	regval &= 0x0FFFFFFF;
+	/* TBD: replace 0x2 with define in iceregs.h */
+	regval |= 0x2;
+	qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_TEST_BUS_CONTROL);
+
+	/*
+	 * Ensure previous instructions was completed before issuing next
+	 * ICE initialization/optimization instruction
+	 */
+	mb();
+}
+
+static void qcom_ice_optimization_enable(struct ice_device *ice_dev)
+{
+	u32 regval;
+
+	regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ADVANCED_CONTROL);
+	if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
+		regval |= 0xD807100;
+	else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1)
+		regval |= 0x3F007100;
+
+	/* ICE Optimizations Enable Sequence */
+	udelay(5);
+	/* [0]-0, [1]-0, [2]-8, [3]-E, [4]-0, [5]-0, [6]-F, [7]-A */
+	qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_ADVANCED_CONTROL);
+	/*
+	 * Ensure previous instructions was completed before issuing next
+	 * ICE initialization/optimization instruction
+	 */
+	mb();
+
+	/* ICE HPG requires sleep before writing */
+	udelay(5);
+	if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1) {
+		regval = 0;
+		regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ENDIAN_SWAP);
+		regval |= 0xF;
+		qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_ENDIAN_SWAP);
+		/*
+		 * Ensure previous instructions were completed before issue
+		 * next ICE commands
+		 */
+		mb();
+	}
+}
+
+static int qcom_ice_wait_bist_status(struct ice_device *ice_dev)
+{
+	int count;
+	u32 reg;
+
+	/* Poll until all BIST bits are reset */
+	for (count = 0; count < QCOM_ICE_MAX_BIST_CHECK_COUNT; count++) {
+		reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BIST_STATUS);
+		if (!(reg & ICE_BIST_STATUS_MASK))
+			break;
+		udelay(50);
+	}
+
+	if (reg)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int qcom_ice_enable(struct ice_device *ice_dev)
+{
+	unsigned int reg;
+	int ret = 0;
+
+	if ((ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) ||
+		((ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2) &&
+		 (ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1)))
+		ret = qcom_ice_wait_bist_status(ice_dev);
+	if (ret) {
+		dev_err(ice_dev->pdev, "BIST status error (%d)\n", ret);
+		return ret;
+	}
+
+	/* Starting ICE v3 enabling is done at storage controller (UFS/SDCC) */
+	if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 3)
+		return 0;
+
+	/*
+	 * To enable ICE, perform following
+	 * 1. Set IGNORE_CONTROLLER_RESET to USE in ICE_RESET register
+	 * 2. Disable GLOBAL_BYPASS bit in ICE_CONTROL register
+	 */
+	reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_RESET);
+
+	if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
+		reg &= 0x0;
+	else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1)
+		reg &= ~0x100;
+
+	qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_RESET);
+
+	/*
+	 * Ensure previous instructions was completed before issuing next
+	 * ICE initialization/optimization instruction
+	 */
+	mb();
+
+	reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_CONTROL);
+
+	if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
+		reg &= 0xFFFE;
+	else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1)
+		reg &= ~0x7;
+	qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_CONTROL);
+
+	/*
+	 * Ensure previous instructions was completed before issuing next
+	 * ICE initialization/optimization instruction
+	 */
+	mb();
+
+	if ((ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) ||
+		((ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2) &&
+		 (ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1))) {
+		reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BYPASS_STATUS);
+		if ((reg & 0x80000000) != 0x0) {
+			pr_err("%s: Bypass failed for ice = %p",
+				__func__, (void *)ice_dev);
+			WARN_ON(1);
+		}
+	}
+	return 0;
+}
+
+static int qcom_ice_verify_ice(struct ice_device *ice_dev)
+{
+	unsigned int rev;
+	unsigned int maj_rev, min_rev, step_rev;
+
+	rev = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_VERSION);
+	maj_rev = (rev & ICE_CORE_MAJOR_REV_MASK) >> ICE_CORE_MAJOR_REV;
+	min_rev = (rev & ICE_CORE_MINOR_REV_MASK) >> ICE_CORE_MINOR_REV;
+	step_rev = (rev & ICE_CORE_STEP_REV_MASK) >> ICE_CORE_STEP_REV;
+
+	if (maj_rev > ICE_CORE_CURRENT_MAJOR_VERSION) {
+		pr_err("%s: Unknown QC ICE device at %lu, rev %d.%d.%d\n",
+			__func__, (unsigned long)ice_dev->mmio,
+			maj_rev, min_rev, step_rev);
+		return -ENODEV;
+	}
+	ice_dev->ice_hw_version = rev;
+
+	dev_info(ice_dev->pdev, "QC ICE %d.%d.%d device found @0x%p\n",
+					maj_rev, min_rev, step_rev,
+					ice_dev->mmio);
+
+	return 0;
+}
+
+static void qcom_ice_enable_intr(struct ice_device *ice_dev)
+{
+	unsigned int reg;
+
+	reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
+	reg &= ~QCOM_ICE_NON_SEC_IRQ_MASK;
+	qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
+	/*
+	 * Ensure previous instructions was completed before issuing next
+	 * ICE initialization/optimization instruction
+	 */
+	mb();
+}
+
+static void qcom_ice_disable_intr(struct ice_device *ice_dev)
+{
+	unsigned int reg;
+
+	reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
+	reg |= QCOM_ICE_NON_SEC_IRQ_MASK;
+	qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
+	/*
+	 * Ensure previous instructions was completed before issuing next
+	 * ICE initialization/optimization instruction
+	 */
+	mb();
+}
+
+static irqreturn_t qcom_ice_isr(int isr, void *data)
+{
+	irqreturn_t retval = IRQ_NONE;
+	u32 status;
+	struct ice_device *ice_dev = data;
+
+	status = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_STTS);
+	if (status) {
+		ice_dev->error_cb(ice_dev->host_controller_data, status);
+
+		/* Interrupt has been handled. Clear the IRQ */
+		qcom_ice_writel(ice_dev, status, QCOM_ICE_REGS_NON_SEC_IRQ_CLR);
+		/* Ensure instruction is completed */
+		mb();
+		retval = IRQ_HANDLED;
+	}
+	return retval;
+}
+
+static void qcom_ice_parse_ice_instance_type(struct platform_device *pdev,
+		struct ice_device *ice_dev)
+{
+	int ret = -1;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	const char *type;
+
+	ret = of_property_read_string_index(np, "qcom,instance-type", 0, &type);
+	if (ret) {
+		pr_err("%s: Could not get ICE instance type\n", __func__);
+		goto out;
+	}
+	strlcpy(ice_dev->ice_instance_type, type, QCOM_ICE_TYPE_NAME_LEN);
+out:
+	return;
+}
+
+static int qcom_ice_parse_clock_info(struct platform_device *pdev,
+		struct ice_device *ice_dev)
+{
+	int ret = -1, cnt, i, len;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	char *name;
+	struct ice_clk_info *clki;
+	u32 *clkfreq = NULL;
+
+	if (!np)
+		goto out;
+
+	cnt = of_property_count_strings(np, "clock-names");
+	if (cnt <= 0) {
+		dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
+				__func__);
+		ret = cnt;
+		goto out;
+	}
+
+	if (!of_get_property(np, "qcom,op-freq-hz", &len)) {
+		dev_info(dev, "qcom,op-freq-hz property not specified\n");
+		goto out;
+	}
+
+	len = len/sizeof(*clkfreq);
+	if (len != cnt)
+		goto out;
+
+	clkfreq = devm_kzalloc(dev, len * sizeof(*clkfreq), GFP_KERNEL);
+	if (!clkfreq) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	ret = of_property_read_u32_array(np, "qcom,op-freq-hz", clkfreq, len);
+
+	INIT_LIST_HEAD(&ice_dev->clk_list_head);
+
+	for (i = 0; i < cnt; i++) {
+		ret = of_property_read_string_index(np,
+				"clock-names", i, (const char **)&name);
+		if (ret)
+			goto out;
+
+		clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
+		if (!clki) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		clki->max_freq = clkfreq[i];
+		clki->name = kstrdup(name, GFP_KERNEL);
+		list_add_tail(&clki->list, &ice_dev->clk_list_head);
+	}
+out:
+	if (clkfreq)
+		devm_kfree(dev, (void *)clkfreq);
+	return ret;
+}
+
+static int qcom_ice_get_device_tree_data(struct platform_device *pdev,
+		struct ice_device *ice_dev)
+{
+	struct device *dev = &pdev->dev;
+	int rc = -1;
+	int irq;
+
+	ice_dev->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!ice_dev->res) {
+		pr_err("%s: No memory available for IORESOURCE\n", __func__);
+		return -ENOMEM;
+	}
+
+	ice_dev->mmio = devm_ioremap_resource(dev, ice_dev->res);
+	if (IS_ERR(ice_dev->mmio)) {
+		rc = PTR_ERR(ice_dev->mmio);
+		pr_err("%s: Error = %d mapping ICE io memory\n", __func__, rc);
+		goto out;
+	}
+
+	if (!of_parse_phandle(pdev->dev.of_node, "vdd-hba-supply", 0)) {
+		pr_err("%s: No vdd-hba-supply regulator, assuming not needed\n",
+								 __func__);
+		ice_dev->is_regulator_available = false;
+	} else {
+		ice_dev->is_regulator_available = true;
+	}
+	ice_dev->is_ice_clk_available = of_property_read_bool(
+						(&pdev->dev)->of_node,
+						"qcom,enable-ice-clk");
+
+	if (ice_dev->is_ice_clk_available) {
+		rc = qcom_ice_parse_clock_info(pdev, ice_dev);
+		if (rc) {
+			pr_err("%s: qcom_ice_parse_clock_info failed (%d)\n",
+				__func__, rc);
+			goto err_dev;
+		}
+	}
+
+	/* ICE interrupts is only relevant for v2.x */
+	irq = platform_get_irq(pdev, 0);
+	if (irq >= 0) {
+		rc = devm_request_irq(dev, irq, qcom_ice_isr, 0, dev_name(dev),
+				ice_dev);
+		if (rc) {
+			pr_err("%s: devm_request_irq irq=%d failed (%d)\n",
+				__func__, irq, rc);
+			goto err_dev;
+		}
+		ice_dev->irq = irq;
+		pr_info("ICE IRQ = %d\n", ice_dev->irq);
+	} else {
+		dev_dbg(dev, "IRQ resource not available\n");
+	}
+
+	qcom_ice_parse_ice_instance_type(pdev, ice_dev);
+
+	return 0;
+err_dev:
+	if (rc && ice_dev->mmio)
+		devm_iounmap(dev, ice_dev->mmio);
+out:
+	return rc;
+}
+
+/*
+ * ICE HW instance can exist in UFS or eMMC based storage HW
+ * Userspace does not know what kind of ICE it is dealing with.
+ * Though userspace can find which storage device it is booting
+ * from but all kind of storage types dont support ICE from
+ * beginning. So ICE device is created for user space to ping
+ * if ICE exist for that kind of storage
+ */
+static const struct file_operations qcom_ice_fops = {
+	.owner = THIS_MODULE,
+};
+
+static int register_ice_device(struct ice_device *ice_dev)
+{
+	int rc = 0;
+	unsigned int baseminor = 0;
+	unsigned int count = 1;
+	struct device *class_dev;
+	int is_sdcc_ice = !strcmp(ice_dev->ice_instance_type, "sdcc");
+
+	rc = alloc_chrdev_region(&ice_dev->device_no, baseminor, count,
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+	if (rc < 0) {
+		pr_err("alloc_chrdev_region failed %d for %s\n", rc,
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+		return rc;
+	}
+	ice_dev->driver_class = class_create(THIS_MODULE,
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+	if (IS_ERR(ice_dev->driver_class)) {
+		rc = -ENOMEM;
+		pr_err("class_create failed %d for %s\n", rc,
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+		goto exit_unreg_chrdev_region;
+	}
+	class_dev = device_create(ice_dev->driver_class, NULL,
+					ice_dev->device_no, NULL,
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+
+	if (!class_dev) {
+		pr_err("class_device_create failed %d for %s\n", rc,
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+		rc = -ENOMEM;
+		goto exit_destroy_class;
+	}
+
+	cdev_init(&ice_dev->cdev, &qcom_ice_fops);
+	ice_dev->cdev.owner = THIS_MODULE;
+
+	rc = cdev_add(&ice_dev->cdev, MKDEV(MAJOR(ice_dev->device_no), 0), 1);
+	if (rc < 0) {
+		pr_err("cdev_add failed %d for %s\n", rc,
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+		goto exit_destroy_device;
+	}
+	return  0;
+
+exit_destroy_device:
+	device_destroy(ice_dev->driver_class, ice_dev->device_no);
+
+exit_destroy_class:
+	class_destroy(ice_dev->driver_class);
+
+exit_unreg_chrdev_region:
+	unregister_chrdev_region(ice_dev->device_no, 1);
+	return rc;
+}
+
+static int qcom_ice_probe(struct platform_device *pdev)
+{
+	struct ice_device *ice_dev;
+	int rc = 0;
+
+	if (!pdev) {
+		pr_err("%s: Invalid platform_device passed\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	ice_dev = kzalloc(sizeof(struct ice_device), GFP_KERNEL);
+
+	if (!ice_dev) {
+		rc = -ENOMEM;
+		pr_err("%s: Error %d allocating memory for ICE device:\n",
+			__func__, rc);
+		goto out;
+	}
+
+	ice_dev->pdev = &pdev->dev;
+	if (!ice_dev->pdev) {
+		rc = -EINVAL;
+		pr_err("%s: Invalid device passed in platform_device\n",
+								__func__);
+		goto err_ice_dev;
+	}
+
+	if (pdev->dev.of_node)
+		rc = qcom_ice_get_device_tree_data(pdev, ice_dev);
+	else {
+		rc = -EINVAL;
+		pr_err("%s: ICE device node not found\n", __func__);
+	}
+
+	if (rc)
+		goto err_ice_dev;
+
+	pr_debug("%s: Registering ICE device\n", __func__);
+	rc = register_ice_device(ice_dev);
+	if (rc) {
+		pr_err("create character device failed.\n");
+		goto err_ice_dev;
+	}
+
+	/*
+	 * If ICE is enabled here, it would be waste of power.
+	 * We would enable ICE when first request for crypto
+	 * operation arrives.
+	 */
+	ice_dev->is_ice_enabled = false;
+
+	platform_set_drvdata(pdev, ice_dev);
+	list_add_tail(&ice_dev->list, &ice_devices);
+
+	goto out;
+
+err_ice_dev:
+	kfree(ice_dev);
+out:
+	return rc;
+}
+
+static int qcom_ice_remove(struct platform_device *pdev)
+{
+	struct ice_device *ice_dev;
+
+	ice_dev = (struct ice_device *)platform_get_drvdata(pdev);
+
+	if (!ice_dev)
+		return 0;
+
+	qcom_ice_disable_intr(ice_dev);
+
+	device_init_wakeup(&pdev->dev, false);
+	if (ice_dev->mmio)
+		iounmap(ice_dev->mmio);
+
+	list_del_init(&ice_dev->list);
+	kfree(ice_dev);
+
+	return 1;
+}
+
+static int  qcom_ice_suspend(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int qcom_ice_restore_config(void)
+{
+	struct scm_desc desc = {0};
+	int ret;
+
+	/*
+	 * TZ would check KEYS_RAM_RESET_COMPLETED status bit before processing
+	 * restore config command. This would prevent two calls from HLOS to TZ
+	 * One to check KEYS_RAM_RESET_COMPLETED status bit second to restore
+	 * config
+	 */
+
+	desc.arginfo = TZ_OS_KS_RESTORE_KEY_ID_PARAM_ID;
+
+	ret = scm_call2(TZ_OS_KS_RESTORE_KEY_ID, &desc);
+
+	if (ret)
+		pr_err("%s: Error: 0x%x\n", __func__, ret);
+
+	return ret;
+}
+
+static int qcom_ice_restore_key_config(struct ice_device *ice_dev)
+{
+	struct scm_desc desc = {0};
+	int ret = -1;
+
+	/* For ice 3, key configuration needs to be restored in case of reset */
+
+	desc.arginfo = TZ_OS_KS_RESTORE_KEY_CONFIG_ID_PARAM_ID;
+
+	if (!strcmp(ice_dev->ice_instance_type, "sdcc"))
+		desc.args[0] = QCOM_ICE_SDCC;
+
+	if (!strcmp(ice_dev->ice_instance_type, "ufs"))
+		desc.args[0] = QCOM_ICE_UFS;
+
+	ret = scm_call2(TZ_OS_KS_RESTORE_KEY_CONFIG_ID, &desc);
+
+	if (ret)
+		pr_err("%s: Error:  0x%x\n", __func__, ret);
+
+	return ret;
+}
+
+static int qcom_ice_init_clocks(struct ice_device *ice)
+{
+	int ret = -EINVAL;
+	struct ice_clk_info *clki;
+	struct device *dev = ice->pdev;
+	struct list_head *head = &ice->clk_list_head;
+
+	if (!head || list_empty(head)) {
+		dev_err(dev, "%s:ICE Clock list null/empty\n", __func__);
+		goto out;
+	}
+
+	list_for_each_entry(clki, head, list) {
+		if (!clki->name)
+			continue;
+
+		clki->clk = devm_clk_get(dev, clki->name);
+		if (IS_ERR(clki->clk)) {
+			ret = PTR_ERR(clki->clk);
+			dev_err(dev, "%s: %s clk get failed, %d\n",
+					__func__, clki->name, ret);
+			goto out;
+		}
+
+		/* Not all clocks would have a rate to be set */
+		ret = 0;
+		if (clki->max_freq) {
+			ret = clk_set_rate(clki->clk, clki->max_freq);
+			if (ret) {
+				dev_err(dev,
+				"%s: %s clk set rate(%dHz) failed, %d\n",
+						__func__, clki->name,
+				clki->max_freq, ret);
+				goto out;
+			}
+			clki->curr_freq = clki->max_freq;
+			dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
+				clki->name, clk_get_rate(clki->clk));
+		}
+	}
+out:
+	return ret;
+}
+
+static int qcom_ice_enable_clocks(struct ice_device *ice, bool enable)
+{
+	int ret = 0;
+	struct ice_clk_info *clki;
+	struct device *dev = ice->pdev;
+	struct list_head *head = &ice->clk_list_head;
+
+	if (!head || list_empty(head)) {
+		dev_err(dev, "%s:ICE Clock list null/empty\n", __func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!ice->is_ice_clk_available) {
+		dev_err(dev, "%s:ICE Clock not available\n", __func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	list_for_each_entry(clki, head, list) {
+		if (!clki->name)
+			continue;
+
+		if (enable)
+			ret = clk_prepare_enable(clki->clk);
+		else
+			clk_disable_unprepare(clki->clk);
+
+		if (ret) {
+			dev_err(dev, "Unable to %s ICE core clk\n",
+				enable?"enable":"disable");
+			goto out;
+		}
+	}
+out:
+	return ret;
+}
+
+static int qcom_ice_secure_ice_init(struct ice_device *ice_dev)
+{
+	/* We need to enable source for ICE secure interrupts */
+	int ret = 0;
+	u32 regval;
+
+	regval = scm_io_read((unsigned long)ice_dev->res +
+			QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_MASK);
+
+	regval &= ~QCOM_ICE_SEC_IRQ_MASK;
+	ret = scm_io_write((unsigned long)ice_dev->res +
+			QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_MASK, regval);
+
+	/*
+	 * Ensure previous instructions was completed before issuing next
+	 * ICE initialization/optimization instruction
+	 */
+	mb();
+
+	if (!ret)
+		pr_err("%s: failed(0x%x) to init secure ICE config\n",
+								__func__, ret);
+	return ret;
+}
+
+static int qcom_ice_update_sec_cfg(struct ice_device *ice_dev)
+{
+	int ret = 0, scm_ret = 0;
+
+	/* scm command buffer structure */
+	struct qcom_scm_cmd_buf {
+		unsigned int device_id;
+		unsigned int spare;
+	} cbuf = {0};
+
+	/*
+	 * Ideally, we should check ICE version to decide whether to proceed or
+	 * or not. Since version wont be available when this function is called
+	 * we need to depend upon is_ice_clk_available to decide
+	 */
+	if (ice_dev->is_ice_clk_available)
+		goto out;
+
+	/*
+	 * Store dev_id in ice_device structure so that emmc/ufs cases can be
+	 * handled properly
+	 */
+	#define RESTORE_SEC_CFG_CMD	0x2
+	#define ICE_TZ_DEV_ID	20
+
+	cbuf.device_id = ICE_TZ_DEV_ID;
+	ret = scm_restore_sec_cfg(cbuf.device_id, cbuf.spare, &scm_ret);
+	if (ret || scm_ret) {
+		pr_err("%s: failed, ret %d scm_ret %d\n",
+						__func__, ret, scm_ret);
+		if (!ret)
+			ret = scm_ret;
+	}
+out:
+
+	return ret;
+}
+
+static int qcom_ice_finish_init(struct ice_device *ice_dev)
+{
+	unsigned int reg;
+	int err = 0;
+
+	if (!ice_dev) {
+		pr_err("%s: Null data received\n", __func__);
+		err = -ENODEV;
+		goto out;
+	}
+
+	if (ice_dev->is_ice_clk_available) {
+		err = qcom_ice_init_clocks(ice_dev);
+		if (err)
+			goto out;
+
+		err = qcom_ice_bus_register(ice_dev);
+		if (err)
+			goto out;
+	}
+
+	/*
+	 * It is possible that ICE device is not probed when host is probed
+	 * This would cause host probe to be deferred. When probe for host is
+	 * deferred, it can cause power collapse for host and that can wipe
+	 * configurations of host & ice. It is prudent to restore the config
+	 */
+	err = qcom_ice_update_sec_cfg(ice_dev);
+	if (err)
+		goto out;
+
+	err = qcom_ice_verify_ice(ice_dev);
+	if (err)
+		goto out;
+
+	/* if ICE_DISABLE_FUSE is blown, return immediately
+	 * Currently, FORCE HW Keys are also disabled, since
+	 * there is no use case for their usage neither in FDE
+	 * nor in PFE
+	 */
+	reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_FUSE_SETTING);
+	reg &= (ICE_FUSE_SETTING_MASK |
+		ICE_FORCE_HW_KEY0_SETTING_MASK |
+		ICE_FORCE_HW_KEY1_SETTING_MASK);
+
+	if (reg) {
+		ice_dev->is_ice_disable_fuse_blown = true;
+		pr_err("%s: Error: ICE_ERROR_HW_DISABLE_FUSE_BLOWN\n",
+								__func__);
+		err = -EPERM;
+		goto out;
+	}
+
+	/* TZ side of ICE driver would handle secure init of ICE HW from v2 */
+	if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1 &&
+		!qcom_ice_secure_ice_init(ice_dev)) {
+		pr_err("%s: Error: ICE_ERROR_ICE_TZ_INIT_FAILED\n", __func__);
+		err = -EFAULT;
+		goto out;
+	}
+
+	qcom_ice_low_power_mode_enable(ice_dev);
+	qcom_ice_optimization_enable(ice_dev);
+	qcom_ice_config_proc_ignore(ice_dev);
+	qcom_ice_enable_test_bus_config(ice_dev);
+	qcom_ice_enable(ice_dev);
+	ice_dev->is_ice_enabled = true;
+	qcom_ice_enable_intr(ice_dev);
+
+out:
+	return err;
+}
+
+static int qcom_ice_init(struct platform_device *pdev,
+			void *host_controller_data,
+			ice_error_cb error_cb)
+{
+	/*
+	 * A completion event for host controller would be triggered upon
+	 * initialization completion
+	 * When ICE is initialized, it would put ICE into Global Bypass mode
+	 * When any request for data transfer is received, it would enable
+	 * the ICE for that particular request
+	 */
+	struct ice_device *ice_dev;
+
+	ice_dev = platform_get_drvdata(pdev);
+	if (!ice_dev) {
+		pr_err("%s: invalid device\n", __func__);
+		return -EINVAL;
+	}
+
+	ice_dev->error_cb = error_cb;
+	ice_dev->host_controller_data = host_controller_data;
+
+	return qcom_ice_finish_init(ice_dev);
+}
+
+static int qcom_ice_finish_power_collapse(struct ice_device *ice_dev)
+{
+	int err = 0;
+
+	if (ice_dev->is_ice_disable_fuse_blown) {
+		err = -EPERM;
+		goto out;
+	}
+
+	if (ice_dev->is_ice_enabled) {
+		/*
+		 * ICE resets into global bypass mode with optimization and
+		 * low power mode disabled. Hence we need to redo those seq's.
+		 */
+		qcom_ice_low_power_mode_enable(ice_dev);
+
+		qcom_ice_enable_test_bus_config(ice_dev);
+
+		qcom_ice_optimization_enable(ice_dev);
+		qcom_ice_enable(ice_dev);
+
+		if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1) {
+			/*
+			 * When ICE resets, it wipes all of keys from LUTs
+			 * ICE driver should call TZ to restore keys
+			 */
+			if (qcom_ice_restore_config()) {
+				err = -EFAULT;
+				goto out;
+			}
+
+		/*
+		 * ICE looses its key configuration when UFS is reset,
+		 * restore it
+		 */
+		} else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) {
+			err = qcom_ice_restore_key_config(ice_dev);
+			if (err)
+				goto out;
+
+			/*
+			 * for PFE case, clear the cached ICE key table,
+			 * this will force keys to be reconfigured
+			 * per each next transaction
+			 */
+			pfk_clear_on_reset();
+		}
+	}
+
+	ice_dev->ice_reset_complete_time = ktime_get();
+out:
+	return err;
+}
+
+static int qcom_ice_resume(struct platform_device *pdev)
+{
+	/*
+	 * ICE is power collapsed when storage controller is power collapsed
+	 * ICE resume function is responsible for:
+	 * ICE HW enabling sequence
+	 * Key restoration
+	 * A completion event should be triggered
+	 * upon resume completion
+	 * Storage driver will be fully operational only
+	 * after receiving this event
+	 */
+	struct ice_device *ice_dev;
+
+	ice_dev = platform_get_drvdata(pdev);
+
+	if (!ice_dev)
+		return -EINVAL;
+
+	if (ice_dev->is_ice_clk_available) {
+		/*
+		 * Storage is calling this function after power collapse which
+		 * would put ICE into GLOBAL_BYPASS mode. Make sure to enable
+		 * ICE
+		 */
+		qcom_ice_enable(ice_dev);
+	}
+
+	return 0;
+}
+
+static void qcom_ice_dump_test_bus(struct ice_device *ice_dev)
+{
+	u32 reg = 0x1;
+	u32 val;
+	u8 bus_selector;
+	u8 stream_selector;
+
+	pr_err("ICE TEST BUS DUMP:\n");
+
+	for (bus_selector = 0; bus_selector <= 0xF;  bus_selector++) {
+		reg = 0x1;	/* enable test bus */
+		reg |= bus_selector << 28;
+		if (bus_selector == 0xD)
+			continue;
+		qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_TEST_BUS_CONTROL);
+		/*
+		 * make sure test bus selector is written before reading
+		 * the test bus register
+		 */
+		mb();
+		val = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_TEST_BUS_REG);
+		pr_err("ICE_TEST_BUS_CONTROL: 0x%08x | ICE_TEST_BUS_REG: 0x%08x\n",
+			reg, val);
+	}
+
+	pr_err("ICE TEST BUS DUMP (ICE_STREAM1_DATAPATH_TEST_BUS):\n");
+	for (stream_selector = 0; stream_selector <= 0xF; stream_selector++) {
+		reg = 0xD0000001;	/* enable stream test bus */
+		reg |= stream_selector << 16;
+		qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_TEST_BUS_CONTROL);
+		/*
+		 * make sure test bus selector is written before reading
+		 * the test bus register
+		 */
+		mb();
+		val = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_TEST_BUS_REG);
+		pr_err("ICE_TEST_BUS_CONTROL: 0x%08x | ICE_TEST_BUS_REG: 0x%08x\n",
+			reg, val);
+	}
+}
+
+static void qcom_ice_debug(struct platform_device *pdev)
+{
+	struct ice_device *ice_dev;
+
+	if (!pdev) {
+		pr_err("%s: Invalid params passed\n", __func__);
+		goto out;
+	}
+
+	ice_dev = platform_get_drvdata(pdev);
+
+	if (!ice_dev) {
+		pr_err("%s: No ICE device available\n", __func__);
+		goto out;
+	}
+
+	if (!ice_dev->is_ice_enabled) {
+		pr_err("%s: ICE device is not enabled\n", __func__);
+		goto out;
+	}
+
+	pr_err("%s: =========== REGISTER DUMP (%p)===========\n",
+			ice_dev->ice_instance_type, ice_dev);
+
+	pr_err("%s: ICE Control: 0x%08x | ICE Reset: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_CONTROL),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_RESET));
+
+	pr_err("%s: ICE Version: 0x%08x | ICE FUSE:  0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_VERSION),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_FUSE_SETTING));
+
+	pr_err("%s: ICE Param1: 0x%08x | ICE Param2:  0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_1),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_2));
+
+	pr_err("%s: ICE Param3: 0x%08x | ICE Param4:  0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_3),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_4));
+
+	pr_err("%s: ICE Param5: 0x%08x | ICE IRQ STTS:  0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_5),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_STTS));
+
+	pr_err("%s: ICE IRQ MASK: 0x%08x | ICE IRQ CLR:  0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_MASK),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_CLR));
+
+	if (ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) {
+		pr_err("%s: ICE INVALID CCFG ERR STTS: 0x%08x\n",
+			ice_dev->ice_instance_type,
+			qcom_ice_readl(ice_dev,
+				QCOM_ICE_INVALID_CCFG_ERR_STTS));
+	}
+
+	if ((ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) ||
+		((ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2) &&
+		 (ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1))) {
+		pr_err("%s: ICE BIST Sts: 0x%08x | ICE Bypass Sts:  0x%08x\n",
+			ice_dev->ice_instance_type,
+			qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BIST_STATUS),
+			qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BYPASS_STATUS));
+	}
+
+	pr_err("%s: ICE ADV CTRL: 0x%08x | ICE ENDIAN SWAP:  0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ADVANCED_CONTROL),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ENDIAN_SWAP));
+
+	pr_err("%s: ICE_STM1_ERR_SYND1: 0x%08x | ICE_STM1_ERR_SYND2: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_ERROR_SYNDROME1),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_ERROR_SYNDROME2));
+
+	pr_err("%s: ICE_STM2_ERR_SYND1: 0x%08x | ICE_STM2_ERR_SYND2: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_ERROR_SYNDROME1),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_ERROR_SYNDROME2));
+
+	pr_err("%s: ICE_STM1_COUNTER1: 0x%08x | ICE_STM1_COUNTER2: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS1),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS2));
+
+	pr_err("%s: ICE_STM1_COUNTER3: 0x%08x | ICE_STM1_COUNTER4: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS3),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS4));
+
+	pr_err("%s: ICE_STM2_COUNTER1: 0x%08x | ICE_STM2_COUNTER2: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS1),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS2));
+
+	pr_err("%s: ICE_STM2_COUNTER3: 0x%08x | ICE_STM2_COUNTER4: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS3),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS4));
+
+	pr_err("%s: ICE_STM1_CTR5_MSB: 0x%08x | ICE_STM1_CTR5_LSB: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS5_MSB),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS5_LSB));
+
+	pr_err("%s: ICE_STM1_CTR6_MSB: 0x%08x | ICE_STM1_CTR6_LSB: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS6_MSB),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS6_LSB));
+
+	pr_err("%s: ICE_STM1_CTR7_MSB: 0x%08x | ICE_STM1_CTR7_LSB: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS7_MSB),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS7_LSB));
+
+	pr_err("%s: ICE_STM1_CTR8_MSB: 0x%08x | ICE_STM1_CTR8_LSB: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS8_MSB),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS8_LSB));
+
+	pr_err("%s: ICE_STM1_CTR9_MSB: 0x%08x | ICE_STM1_CTR9_LSB: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS9_MSB),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS9_LSB));
+
+	pr_err("%s: ICE_STM2_CTR5_MSB: 0x%08x | ICE_STM2_CTR5_LSB: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS5_MSB),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS5_LSB));
+
+	pr_err("%s: ICE_STM2_CTR6_MSB: 0x%08x | ICE_STM2_CTR6_LSB: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS6_MSB),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS6_LSB));
+
+	pr_err("%s: ICE_STM2_CTR7_MSB: 0x%08x | ICE_STM2_CTR7_LSB: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS7_MSB),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS7_LSB));
+
+	pr_err("%s: ICE_STM2_CTR8_MSB: 0x%08x | ICE_STM2_CTR8_LSB: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS8_MSB),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS8_LSB));
+
+	pr_err("%s: ICE_STM2_CTR9_MSB: 0x%08x | ICE_STM2_CTR9_LSB: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS9_MSB),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS9_LSB));
+
+	qcom_ice_dump_test_bus(ice_dev);
+	pr_err("%s: ICE reset start time: %llu ICE reset done time: %llu\n",
+			ice_dev->ice_instance_type,
+		(unsigned long long)ice_dev->ice_reset_start_time.tv64,
+		(unsigned long long)ice_dev->ice_reset_complete_time.tv64);
+
+	if (ktime_to_us(ktime_sub(ice_dev->ice_reset_complete_time,
+				  ice_dev->ice_reset_start_time)) > 0)
+		pr_err("%s: Time taken for reset: %lu\n",
+			ice_dev->ice_instance_type,
+			(unsigned long)ktime_to_us(ktime_sub(
+					ice_dev->ice_reset_complete_time,
+					ice_dev->ice_reset_start_time)));
+out:
+	return;
+}
+
+static int qcom_ice_reset(struct  platform_device *pdev)
+{
+	struct ice_device *ice_dev;
+
+	ice_dev = platform_get_drvdata(pdev);
+	if (!ice_dev) {
+		pr_err("%s: INVALID ice_dev\n", __func__);
+		return -EINVAL;
+	}
+
+	ice_dev->ice_reset_start_time = ktime_get();
+
+	return qcom_ice_finish_power_collapse(ice_dev);
+}
+
+static int qcom_ice_config_start(struct platform_device *pdev,
+		struct request *req,
+		struct ice_data_setting *setting, bool async)
+{
+	struct ice_crypto_setting *crypto_data;
+	struct ice_crypto_setting pfk_crypto_data = {0};
+	union map_info *info;
+	int ret = 0;
+	bool is_pfe = false;
+
+	if (!pdev || !req || !setting) {
+		pr_err("%s: Invalid params passed\n", __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * It is not an error to have a request with no  bio
+	 * Such requests must bypass ICE. So first set bypass and then
+	 * return if bio is not available in request
+	 */
+	if (setting) {
+		setting->encr_bypass = true;
+		setting->decr_bypass = true;
+	}
+
+	if (!req->bio) {
+		/* It is not an error to have a request with no  bio */
+		return 0;
+	}
+
+	ret = pfk_load_key_start(req->bio, &pfk_crypto_data, &is_pfe, async);
+	if (is_pfe) {
+		if (ret) {
+			if (ret != -EBUSY && ret != -EAGAIN)
+				pr_err("%s error %d while configuring ice key for PFE\n",
+						__func__, ret);
+			return ret;
+		}
+
+		return qti_ice_setting_config(req, pdev,
+				&pfk_crypto_data, setting);
+	}
+
+	/*
+	 * info field in req->end_io_data could be used by mulitple dm or
+	 * non-dm entities. To ensure that we are running operation on dm
+	 * based request, check BIO_DONT_FREE flag
+	 */
+	if (bio_flagged(req->bio, BIO_INLINECRYPT)) {
+		info = dm_get_rq_mapinfo(req);
+		if (!info) {
+			pr_debug("%s info not available in request\n",
+				 __func__);
+			return 0;
+		}
+
+		crypto_data = (struct ice_crypto_setting *)info->ptr;
+		if (!crypto_data) {
+			pr_err("%s crypto_data not available in request\n",
+				 __func__);
+			return -EINVAL;
+		}
+
+		return qti_ice_setting_config(req, pdev,
+				crypto_data, setting);
+	}
+
+	/*
+	 * It is not an error. If target is not req-crypt based, all request
+	 * from storage driver would come here to check if there is any ICE
+	 * setting required
+	 */
+	return 0;
+}
+EXPORT_SYMBOL(qcom_ice_config_start);
+
+static int qcom_ice_config_end(struct request *req)
+{
+	int ret = 0;
+	bool is_pfe = false;
+
+	if (!req) {
+		pr_err("%s: Invalid params passed\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!req->bio) {
+		/* It is not an error to have a request with no  bio */
+		return 0;
+	}
+
+	ret = pfk_load_key_end(req->bio, &is_pfe);
+	if (is_pfe) {
+		if (ret != 0)
+			pr_err("%s error %d while end configuring ice key for PFE\n",
+								__func__, ret);
+		return ret;
+	}
+
+
+	return 0;
+}
+EXPORT_SYMBOL(qcom_ice_config_end);
+
+
+static int qcom_ice_status(struct platform_device *pdev)
+{
+	struct ice_device *ice_dev;
+	unsigned int test_bus_reg_status;
+
+	if (!pdev) {
+		pr_err("%s: Invalid params passed\n", __func__);
+		return -EINVAL;
+	}
+
+	ice_dev = platform_get_drvdata(pdev);
+
+	if (!ice_dev)
+		return -ENODEV;
+
+	if (!ice_dev->is_ice_enabled)
+		return -ENODEV;
+
+	test_bus_reg_status = qcom_ice_readl(ice_dev,
+					QCOM_ICE_REGS_TEST_BUS_REG);
+
+	return !!(test_bus_reg_status & QCOM_ICE_TEST_BUS_REG_NON_SECURE_INTR);
+
+}
+
+struct qcom_ice_variant_ops qcom_ice_ops = {
+	.name             = "qcom",
+	.init             = qcom_ice_init,
+	.reset            = qcom_ice_reset,
+	.resume           = qcom_ice_resume,
+	.suspend          = qcom_ice_suspend,
+	.config_start     = qcom_ice_config_start,
+	.config_end       = qcom_ice_config_end,
+	.status           = qcom_ice_status,
+	.debug            = qcom_ice_debug,
+};
+
+struct platform_device *qcom_ice_get_pdevice(struct device_node *node)
+{
+	struct platform_device *ice_pdev = NULL;
+	struct ice_device *ice_dev = NULL;
+
+	if (!node) {
+		pr_err("%s: invalid node %p", __func__, node);
+		goto out;
+	}
+
+	if (!of_device_is_available(node)) {
+		pr_err("%s: device unavailable\n", __func__);
+		goto out;
+	}
+
+	if (list_empty(&ice_devices)) {
+		pr_err("%s: invalid device list\n", __func__);
+		ice_pdev = ERR_PTR(-EPROBE_DEFER);
+		goto out;
+	}
+
+	list_for_each_entry(ice_dev, &ice_devices, list) {
+		if (ice_dev->pdev->of_node == node) {
+			pr_info("%s: found ice device %p\n", __func__, ice_dev);
+			break;
+		}
+	}
+
+	ice_pdev = to_platform_device(ice_dev->pdev);
+	pr_info("%s: matching platform device %p\n", __func__, ice_pdev);
+out:
+	return ice_pdev;
+}
+
+static struct ice_device *get_ice_device_from_storage_type
+					(const char *storage_type)
+{
+	struct ice_device *ice_dev = NULL;
+
+	if (list_empty(&ice_devices)) {
+		pr_err("%s: invalid device list\n", __func__);
+		ice_dev = ERR_PTR(-EPROBE_DEFER);
+		goto out;
+	}
+
+	list_for_each_entry(ice_dev, &ice_devices, list) {
+		if (!strcmp(ice_dev->ice_instance_type, storage_type)) {
+			pr_info("%s: found ice device %p\n", __func__, ice_dev);
+			break;
+		}
+	}
+out:
+	return ice_dev;
+}
+
+static int enable_ice_setup(struct ice_device *ice_dev)
+{
+	int ret = -1, vote;
+
+	/* Setup Regulator */
+	if (ice_dev->is_regulator_available) {
+		if (qcom_ice_get_vreg(ice_dev)) {
+			pr_err("%s: Could not get regulator\n", __func__);
+			goto out;
+		}
+		ret = regulator_enable(ice_dev->reg);
+		if (ret) {
+			pr_err("%s:%p: Could not enable regulator\n",
+					__func__, ice_dev);
+			goto out;
+		}
+	}
+
+	/* Setup Clocks */
+	if (qcom_ice_enable_clocks(ice_dev, true)) {
+		pr_err("%s:%p:%s Could not enable clocks\n", __func__,
+				ice_dev, ice_dev->ice_instance_type);
+		goto out_reg;
+	}
+
+	/* Setup Bus Vote */
+	vote = qcom_ice_get_bus_vote(ice_dev, "MAX");
+	if (vote < 0)
+		goto out_clocks;
+
+	ret = qcom_ice_set_bus_vote(ice_dev, vote);
+	if (ret) {
+		pr_err("%s:%p: failed %d\n", __func__, ice_dev, ret);
+		goto out_clocks;
+	}
+
+	return ret;
+
+out_clocks:
+	qcom_ice_enable_clocks(ice_dev, false);
+out_reg:
+	if (ice_dev->is_regulator_available) {
+		if (qcom_ice_get_vreg(ice_dev)) {
+			pr_err("%s: Could not get regulator\n", __func__);
+			goto out;
+		}
+		ret = regulator_disable(ice_dev->reg);
+		if (ret) {
+			pr_err("%s:%pK: Could not disable regulator\n",
+					__func__, ice_dev);
+			goto out;
+		}
+	}
+out:
+	return ret;
+}
+
+static int disable_ice_setup(struct ice_device *ice_dev)
+{
+	int ret = -1, vote;
+
+	/* Setup Bus Vote */
+	vote = qcom_ice_get_bus_vote(ice_dev, "MIN");
+	if (vote < 0) {
+		pr_err("%s:%p: Unable to get bus vote\n", __func__, ice_dev);
+		goto out_disable_clocks;
+	}
+
+	ret = qcom_ice_set_bus_vote(ice_dev, vote);
+	if (ret)
+		pr_err("%s:%p: failed %d\n", __func__, ice_dev, ret);
+
+out_disable_clocks:
+
+	/* Setup Clocks */
+	if (qcom_ice_enable_clocks(ice_dev, false))
+		pr_err("%s:%p:%s Could not disable clocks\n", __func__,
+				ice_dev, ice_dev->ice_instance_type);
+
+	/* Setup Regulator */
+	if (ice_dev->is_regulator_available) {
+		if (qcom_ice_get_vreg(ice_dev)) {
+			pr_err("%s: Could not get regulator\n", __func__);
+			goto out;
+		}
+		ret = regulator_disable(ice_dev->reg);
+		if (ret) {
+			pr_err("%s:%p: Could not disable regulator\n",
+					__func__, ice_dev);
+			goto out;
+		}
+	}
+out:
+	return ret;
+}
+
+int qcom_ice_setup_ice_hw(const char *storage_type, int enable)
+{
+	int ret = -1;
+	struct ice_device *ice_dev = NULL;
+
+	ice_dev = get_ice_device_from_storage_type(storage_type);
+	if (ice_dev == ERR_PTR(-EPROBE_DEFER))
+		return -EPROBE_DEFER;
+
+	if (!ice_dev)
+		return ret;
+
+	if (enable)
+		return enable_ice_setup(ice_dev);
+	else
+		return disable_ice_setup(ice_dev);
+}
+
+struct qcom_ice_variant_ops *qcom_ice_get_variant_ops(struct device_node *node)
+{
+	return &qcom_ice_ops;
+}
+EXPORT_SYMBOL(qcom_ice_get_variant_ops);
+
+/* Following struct is required to match device with driver from dts file */
+static const struct of_device_id qcom_ice_match[] = {
+	{ .compatible = "qcom,ice" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, qcom_ice_match);
+
+static struct platform_driver qcom_ice_driver = {
+	.probe          = qcom_ice_probe,
+	.remove         = qcom_ice_remove,
+	.driver         = {
+		.owner  = THIS_MODULE,
+		.name   = "qcom_ice",
+		.of_match_table = qcom_ice_match,
+	},
+};
+module_platform_driver(qcom_ice_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI Inline Crypto Engine driver");
diff --git a/drivers/crypto/msm/iceregs.h b/drivers/crypto/msm/iceregs.h
new file mode 100644
index 0000000..4b63e7a
--- /dev/null
+++ b/drivers/crypto/msm/iceregs.h
@@ -0,0 +1,159 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QCOM_INLINE_CRYPTO_ENGINE_REGS_H_
+#define _QCOM_INLINE_CRYPTO_ENGINE_REGS_H_
+
+/* Register bits for ICE version */
+#define ICE_CORE_CURRENT_MAJOR_VERSION 0x03
+
+#define ICE_CORE_STEP_REV_MASK		0xFFFF
+#define ICE_CORE_STEP_REV		0 /* bit 15-0 */
+#define ICE_CORE_MAJOR_REV_MASK		0xFF000000
+#define ICE_CORE_MAJOR_REV		24 /* bit 31-24 */
+#define ICE_CORE_MINOR_REV_MASK		0xFF0000
+#define ICE_CORE_MINOR_REV		16 /* bit 23-16 */
+
+#define ICE_BIST_STATUS_MASK		(0xF0000000)	/* bits 28-31 */
+
+#define ICE_FUSE_SETTING_MASK			0x1
+#define ICE_FORCE_HW_KEY0_SETTING_MASK		0x2
+#define ICE_FORCE_HW_KEY1_SETTING_MASK		0x4
+
+/* QCOM ICE Registers from SWI */
+#define QCOM_ICE_REGS_CONTROL			0x0000
+#define QCOM_ICE_REGS_RESET			0x0004
+#define QCOM_ICE_REGS_VERSION			0x0008
+#define QCOM_ICE_REGS_FUSE_SETTING		0x0010
+#define QCOM_ICE_REGS_PARAMETERS_1		0x0014
+#define QCOM_ICE_REGS_PARAMETERS_2		0x0018
+#define QCOM_ICE_REGS_PARAMETERS_3		0x001C
+#define QCOM_ICE_REGS_PARAMETERS_4		0x0020
+#define QCOM_ICE_REGS_PARAMETERS_5		0x0024
+
+
+/* QCOM ICE v3.X only */
+#define QCOM_ICE_GENERAL_ERR_STTS		0x0040
+#define QCOM_ICE_INVALID_CCFG_ERR_STTS		0x0030
+#define QCOM_ICE_GENERAL_ERR_MASK		0x0044
+
+
+/* QCOM ICE v2.X only */
+#define QCOM_ICE_REGS_NON_SEC_IRQ_STTS		0x0040
+#define QCOM_ICE_REGS_NON_SEC_IRQ_MASK		0x0044
+
+
+#define QCOM_ICE_REGS_NON_SEC_IRQ_CLR		0x0048
+#define QCOM_ICE_REGS_STREAM1_ERROR_SYNDROME1	0x0050
+#define QCOM_ICE_REGS_STREAM1_ERROR_SYNDROME2	0x0054
+#define QCOM_ICE_REGS_STREAM2_ERROR_SYNDROME1	0x0058
+#define QCOM_ICE_REGS_STREAM2_ERROR_SYNDROME2	0x005C
+#define QCOM_ICE_REGS_STREAM1_BIST_ERROR_VEC	0x0060
+#define QCOM_ICE_REGS_STREAM2_BIST_ERROR_VEC	0x0064
+#define QCOM_ICE_REGS_STREAM1_BIST_FINISH_VEC	0x0068
+#define QCOM_ICE_REGS_STREAM2_BIST_FINISH_VEC	0x006C
+#define QCOM_ICE_REGS_BIST_STATUS		0x0070
+#define QCOM_ICE_REGS_BYPASS_STATUS		0x0074
+#define QCOM_ICE_REGS_ADVANCED_CONTROL		0x1000
+#define QCOM_ICE_REGS_ENDIAN_SWAP		0x1004
+#define QCOM_ICE_REGS_TEST_BUS_CONTROL		0x1010
+#define QCOM_ICE_REGS_TEST_BUS_REG		0x1014
+#define QCOM_ICE_REGS_STREAM1_COUNTERS1		0x1100
+#define QCOM_ICE_REGS_STREAM1_COUNTERS2		0x1104
+#define QCOM_ICE_REGS_STREAM1_COUNTERS3		0x1108
+#define QCOM_ICE_REGS_STREAM1_COUNTERS4		0x110C
+#define QCOM_ICE_REGS_STREAM1_COUNTERS5_MSB	0x1110
+#define QCOM_ICE_REGS_STREAM1_COUNTERS5_LSB	0x1114
+#define QCOM_ICE_REGS_STREAM1_COUNTERS6_MSB	0x1118
+#define QCOM_ICE_REGS_STREAM1_COUNTERS6_LSB	0x111C
+#define QCOM_ICE_REGS_STREAM1_COUNTERS7_MSB	0x1120
+#define QCOM_ICE_REGS_STREAM1_COUNTERS7_LSB	0x1124
+#define QCOM_ICE_REGS_STREAM1_COUNTERS8_MSB	0x1128
+#define QCOM_ICE_REGS_STREAM1_COUNTERS8_LSB	0x112C
+#define QCOM_ICE_REGS_STREAM1_COUNTERS9_MSB	0x1130
+#define QCOM_ICE_REGS_STREAM1_COUNTERS9_LSB	0x1134
+#define QCOM_ICE_REGS_STREAM2_COUNTERS1		0x1200
+#define QCOM_ICE_REGS_STREAM2_COUNTERS2		0x1204
+#define QCOM_ICE_REGS_STREAM2_COUNTERS3		0x1208
+#define QCOM_ICE_REGS_STREAM2_COUNTERS4		0x120C
+#define QCOM_ICE_REGS_STREAM2_COUNTERS5_MSB	0x1210
+#define QCOM_ICE_REGS_STREAM2_COUNTERS5_LSB	0x1214
+#define QCOM_ICE_REGS_STREAM2_COUNTERS6_MSB	0x1218
+#define QCOM_ICE_REGS_STREAM2_COUNTERS6_LSB	0x121C
+#define QCOM_ICE_REGS_STREAM2_COUNTERS7_MSB	0x1220
+#define QCOM_ICE_REGS_STREAM2_COUNTERS7_LSB	0x1224
+#define QCOM_ICE_REGS_STREAM2_COUNTERS8_MSB	0x1228
+#define QCOM_ICE_REGS_STREAM2_COUNTERS8_LSB	0x122C
+#define QCOM_ICE_REGS_STREAM2_COUNTERS9_MSB	0x1230
+#define QCOM_ICE_REGS_STREAM2_COUNTERS9_LSB	0x1234
+
+#define QCOM_ICE_STREAM1_PREMATURE_LBA_CHANGE		(1L << 0)
+#define QCOM_ICE_STREAM2_PREMATURE_LBA_CHANGE		(1L << 1)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_LBO		(1L << 2)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_LBO		(1L << 3)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_DUN		(1L << 4)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_DUN		(1L << 5)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_DUS		(1L << 6)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_DUS		(1L << 7)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_DBO		(1L << 8)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_DBO		(1L << 9)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_ENC_SEL		(1L << 10)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_ENC_SEL		(1L << 11)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_CONF_IDX		(1L << 12)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_CONF_IDX		(1L << 13)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_NEW_TRNS		(1L << 14)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_NEW_TRNS		(1L << 15)
+
+#define QCOM_ICE_NON_SEC_IRQ_MASK				\
+			(QCOM_ICE_STREAM1_PREMATURE_LBA_CHANGE |\
+			 QCOM_ICE_STREAM2_PREMATURE_LBA_CHANGE |\
+			 QCOM_ICE_STREAM1_NOT_EXPECTED_LBO |\
+			 QCOM_ICE_STREAM2_NOT_EXPECTED_LBO |\
+			 QCOM_ICE_STREAM1_NOT_EXPECTED_DUN |\
+			 QCOM_ICE_STREAM2_NOT_EXPECTED_DUN |\
+			 QCOM_ICE_STREAM2_NOT_EXPECTED_DUN |\
+			 QCOM_ICE_STREAM2_NOT_EXPECTED_DUS |\
+			 QCOM_ICE_STREAM1_NOT_EXPECTED_DBO |\
+			 QCOM_ICE_STREAM2_NOT_EXPECTED_DBO |\
+			 QCOM_ICE_STREAM1_NOT_EXPECTED_ENC_SEL |\
+			 QCOM_ICE_STREAM2_NOT_EXPECTED_ENC_SEL |\
+			 QCOM_ICE_STREAM1_NOT_EXPECTED_CONF_IDX |\
+			 QCOM_ICE_STREAM1_NOT_EXPECTED_NEW_TRNS |\
+			 QCOM_ICE_STREAM2_NOT_EXPECTED_NEW_TRNS)
+
+/* QCOM ICE registers from secure side */
+#define QCOM_ICE_TEST_BUS_REG_SECURE_INTR            (1L << 28)
+#define QCOM_ICE_TEST_BUS_REG_NON_SECURE_INTR        (1L << 2)
+
+#define QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_STTS	0x2050
+#define QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_MASK	0x2054
+#define QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_CLR	0x2058
+
+#define QCOM_ICE_STREAM1_PARTIALLY_SET_KEY_USED		(1L << 0)
+#define QCOM_ICE_STREAM2_PARTIALLY_SET_KEY_USED		(1L << 1)
+#define QCOM_ICE_QCOMC_DBG_OPEN_EVENT			(1L << 30)
+#define QCOM_ICE_KEYS_RAM_RESET_COMPLETED		(1L << 31)
+
+#define QCOM_ICE_SEC_IRQ_MASK					  \
+			(QCOM_ICE_STREAM1_PARTIALLY_SET_KEY_USED |\
+			 QCOM_ICE_STREAM2_PARTIALLY_SET_KEY_USED |\
+			 QCOM_ICE_QCOMC_DBG_OPEN_EVENT |	  \
+			 QCOM_ICE_KEYS_RAM_RESET_COMPLETED)
+
+
+#define qcom_ice_writel(ice, val, reg)	\
+	writel_relaxed((val), (ice)->mmio + (reg))
+#define qcom_ice_readl(ice, reg)	\
+	readl_relaxed((ice)->mmio + (reg))
+
+
+#endif /* _QCOM_INLINE_CRYPTO_ENGINE_REGS_H_ */
diff --git a/drivers/crypto/msm/ota_crypto.c b/drivers/crypto/msm/ota_crypto.c
new file mode 100644
index 0000000..3a2a51d
--- /dev/null
+++ b/drivers/crypto/msm/ota_crypto.c
@@ -0,0 +1,974 @@
+/* Copyright (c) 2010-2014,2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* QTI Over the Air (OTA) Crypto driver */
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/cache.h>
+
+
+#include <linux/qcota.h>
+#include "qce.h"
+#include "qce_ota.h"
+
+enum qce_ota_oper_enum {
+	QCE_OTA_F8_OPER   = 0,
+	QCE_OTA_MPKT_F8_OPER = 1,
+	QCE_OTA_F9_OPER  = 2,
+	QCE_OTA_VAR_MPKT_F8_OPER = 3,
+	QCE_OTA_OPER_LAST
+};
+
+struct ota_dev_control;
+
+struct ota_async_req {
+	struct list_head rlist;
+	struct completion complete;
+	int err;
+	enum qce_ota_oper_enum op;
+	union {
+		struct qce_f9_req f9_req;
+		struct qce_f8_req f8_req;
+		struct qce_f8_multi_pkt_req f8_mp_req;
+		struct qce_f8_varible_multi_pkt_req f8_v_mp_req;
+	} req;
+	unsigned int steps;
+	struct ota_qce_dev  *pqce;
+};
+
+/*
+ * Register ourselves as a misc device to be able to access the ota
+ * from userspace.
+ */
+
+
+#define QCOTA_DEV	"qcota"
+
+
+struct ota_dev_control {
+
+	/* misc device */
+	struct miscdevice miscdevice;
+	struct list_head ready_commands;
+	unsigned int magic;
+	struct list_head qce_dev;
+	spinlock_t lock;
+	struct mutex register_lock;
+	bool registered;
+	uint32_t total_units;
+};
+
+struct ota_qce_dev {
+	struct list_head qlist;
+	/* qce handle */
+	void *qce;
+
+	/* platform device */
+	struct platform_device *pdev;
+
+	struct ota_async_req *active_command;
+	struct tasklet_struct done_tasklet;
+	struct ota_dev_control *podev;
+	uint32_t unit;
+	u64 total_req;
+	u64 err_req;
+};
+
+#define OTA_MAGIC 0x4f544143
+
+static long qcota_ioctl(struct file *file,
+			  unsigned int cmd, unsigned long arg);
+static int qcota_open(struct inode *inode, struct file *file);
+static int qcota_release(struct inode *inode, struct file *file);
+static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq);
+static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv, int ret);
+
+static const struct file_operations qcota_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = qcota_ioctl,
+	.open = qcota_open,
+	.release = qcota_release,
+};
+
+static struct ota_dev_control qcota_dev = {
+	.miscdevice = {
+			.minor = MISC_DYNAMIC_MINOR,
+			.name = "qcota0",
+			.fops = &qcota_fops,
+	},
+	.magic = OTA_MAGIC,
+};
+
+#define DEBUG_MAX_FNAME  16
+#define DEBUG_MAX_RW_BUF 1024
+
+struct qcota_stat {
+	u64 f8_req;
+	u64 f8_mp_req;
+	u64 f8_v_mp_req;
+	u64 f9_req;
+	u64 f8_op_success;
+	u64 f8_op_fail;
+	u64 f8_mp_op_success;
+	u64 f8_mp_op_fail;
+	u64 f8_v_mp_op_success;
+	u64 f8_v_mp_op_fail;
+	u64 f9_op_success;
+	u64 f9_op_fail;
+};
+static struct qcota_stat _qcota_stat;
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static int _debug_qcota;
+
+static struct ota_dev_control *qcota_control(void)
+{
+
+	return &qcota_dev;
+}
+
+static int qcota_open(struct inode *inode, struct file *file)
+{
+	struct ota_dev_control *podev;
+
+	podev = qcota_control();
+	if (podev == NULL) {
+		pr_err("%s: no such device %d\n", __func__,
+				MINOR(inode->i_rdev));
+		return -ENOENT;
+	}
+
+	file->private_data = podev;
+
+	return 0;
+}
+
+static int qcota_release(struct inode *inode, struct file *file)
+{
+	struct ota_dev_control *podev;
+
+	podev =  file->private_data;
+
+	if (podev != NULL && podev->magic != OTA_MAGIC) {
+		pr_err("%s: invalid handle %p\n",
+			__func__, podev);
+	}
+
+	file->private_data = NULL;
+
+	return 0;
+}
+
+static bool  _next_v_mp_req(struct ota_async_req *areq)
+{
+	unsigned char *p;
+
+	if (areq->err)
+		return false;
+	if (++areq->steps >= areq->req.f8_v_mp_req.num_pkt)
+		return false;
+
+	p = areq->req.f8_v_mp_req.qce_f8_req.data_in;
+	p += areq->req.f8_v_mp_req.qce_f8_req.data_len;
+	p = (uint8_t *) ALIGN(((uintptr_t)p), L1_CACHE_BYTES);
+
+	areq->req.f8_v_mp_req.qce_f8_req.data_out = p;
+	areq->req.f8_v_mp_req.qce_f8_req.data_in = p;
+	areq->req.f8_v_mp_req.qce_f8_req.data_len =
+		areq->req.f8_v_mp_req.cipher_iov[areq->steps].size;
+
+	areq->req.f8_v_mp_req.qce_f8_req.count_c++;
+	return true;
+}
+
+static void req_done(unsigned long data)
+{
+	struct ota_qce_dev *pqce = (struct ota_qce_dev *)data;
+	struct ota_dev_control *podev = pqce->podev;
+	struct ota_async_req *areq;
+	unsigned long flags;
+	struct ota_async_req *new_req = NULL;
+	int ret = 0;
+	bool schedule = true;
+
+	spin_lock_irqsave(&podev->lock, flags);
+	areq = pqce->active_command;
+	if (unlikely(areq == NULL))
+		pr_err("ota_crypto: req_done, no active request\n");
+	else if (areq->op == QCE_OTA_VAR_MPKT_F8_OPER) {
+		if (_next_v_mp_req(areq)) {
+			/* execute next subcommand */
+			spin_unlock_irqrestore(&podev->lock, flags);
+			ret = start_req(pqce, areq);
+			if (unlikely(ret)) {
+				areq->err = ret;
+				schedule = true;
+				spin_lock_irqsave(&podev->lock, flags);
+			} else {
+				areq = NULL;
+				schedule = false;
+			}
+		} else {
+			/* done with this variable mp req */
+			schedule = true;
+		}
+	}
+	while (schedule) {
+		if (!list_empty(&podev->ready_commands)) {
+			new_req = container_of(podev->ready_commands.next,
+						struct ota_async_req, rlist);
+			list_del(&new_req->rlist);
+			pqce->active_command = new_req;
+			spin_unlock_irqrestore(&podev->lock, flags);
+
+			new_req->err = 0;
+			/* start a new request */
+			ret = start_req(pqce, new_req);
+			if (unlikely(new_req && ret)) {
+				new_req->err = ret;
+				complete(&new_req->complete);
+				ret = 0;
+				new_req = NULL;
+				spin_lock_irqsave(&podev->lock, flags);
+			} else {
+				schedule = false;
+			}
+		} else {
+			pqce->active_command = NULL;
+			spin_unlock_irqrestore(&podev->lock, flags);
+			schedule = false;
+		};
+	}
+	if (areq)
+		complete(&areq->complete);
+}
+
+static void f9_cb(void *cookie, unsigned char *icv, unsigned char *iv,
+	int ret)
+{
+	struct ota_async_req *areq = (struct ota_async_req *) cookie;
+	struct ota_qce_dev *pqce;
+
+	pqce = areq->pqce;
+	areq->req.f9_req.mac_i  = *((uint32_t *)icv);
+
+	if (ret) {
+		pqce->err_req++;
+		areq->err = -ENXIO;
+	} else
+		areq->err = 0;
+
+	tasklet_schedule(&pqce->done_tasklet);
+}
+
+static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv,
+	int ret)
+{
+	struct ota_async_req *areq = (struct ota_async_req *) cookie;
+	struct ota_qce_dev *pqce;
+
+	pqce = areq->pqce;
+
+	if (ret) {
+		pqce->err_req++;
+		areq->err = -ENXIO;
+	} else {
+		areq->err = 0;
+	}
+
+	tasklet_schedule(&pqce->done_tasklet);
+}
+
+static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq)
+{
+	struct qce_f9_req *pf9;
+	struct qce_f8_multi_pkt_req *p_mp_f8;
+	struct qce_f8_req *pf8;
+	int ret = 0;
+
+	/* command should be on the podev->active_command */
+	areq->pqce = pqce;
+
+	switch (areq->op) {
+	case QCE_OTA_F8_OPER:
+		pf8 = &areq->req.f8_req;
+		ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
+		break;
+	case QCE_OTA_MPKT_F8_OPER:
+		p_mp_f8 = &areq->req.f8_mp_req;
+		ret = qce_f8_multi_pkt_req(pqce->qce, p_mp_f8, areq, f8_cb);
+		break;
+
+	case QCE_OTA_F9_OPER:
+		pf9 = &areq->req.f9_req;
+		ret =  qce_f9_req(pqce->qce, pf9, areq, f9_cb);
+		break;
+
+	case QCE_OTA_VAR_MPKT_F8_OPER:
+		pf8 = &areq->req.f8_v_mp_req.qce_f8_req;
+		ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
+		break;
+
+	default:
+		ret = -ENOTSUPP;
+		break;
+	};
+	areq->err = ret;
+	pqce->total_req++;
+	if (ret)
+		pqce->err_req++;
+	return ret;
+}
+
+static struct ota_qce_dev *schedule_qce(struct ota_dev_control *podev)
+{
+	/* do this function with spinlock set */
+	struct ota_qce_dev *p;
+
+	if (unlikely(list_empty(&podev->qce_dev))) {
+		pr_err("%s: no valid qce to schedule\n", __func__);
+		return NULL;
+	}
+
+	list_for_each_entry(p, &podev->qce_dev, qlist) {
+		if (p->active_command == NULL)
+			return p;
+	}
+	return NULL;
+}
+
+static int submit_req(struct ota_async_req *areq, struct ota_dev_control *podev)
+{
+	unsigned long flags;
+	int ret = 0;
+	struct qcota_stat *pstat;
+	struct ota_qce_dev *pqce;
+
+	areq->err = 0;
+
+	spin_lock_irqsave(&podev->lock, flags);
+	pqce = schedule_qce(podev);
+	if (pqce) {
+		pqce->active_command = areq;
+		spin_unlock_irqrestore(&podev->lock, flags);
+
+		ret = start_req(pqce, areq);
+		if (ret != 0) {
+			spin_lock_irqsave(&podev->lock, flags);
+			pqce->active_command = NULL;
+			spin_unlock_irqrestore(&podev->lock, flags);
+		}
+
+	} else {
+		list_add_tail(&areq->rlist, &podev->ready_commands);
+		spin_unlock_irqrestore(&podev->lock, flags);
+	}
+
+	if (ret == 0)
+		wait_for_completion(&areq->complete);
+
+	pstat = &_qcota_stat;
+	switch (areq->op) {
+	case QCE_OTA_F8_OPER:
+		if (areq->err)
+			pstat->f8_op_fail++;
+		else
+			pstat->f8_op_success++;
+		break;
+
+	case QCE_OTA_MPKT_F8_OPER:
+
+		if (areq->err)
+			pstat->f8_mp_op_fail++;
+		else
+			pstat->f8_mp_op_success++;
+		break;
+
+	case QCE_OTA_F9_OPER:
+		if (areq->err)
+			pstat->f9_op_fail++;
+		else
+			pstat->f9_op_success++;
+		break;
+	case QCE_OTA_VAR_MPKT_F8_OPER:
+	default:
+		if (areq->err)
+			pstat->f8_v_mp_op_fail++;
+		else
+			pstat->f8_v_mp_op_success++;
+		break;
+	};
+
+	return areq->err;
+}
+
+static long qcota_ioctl(struct file *file,
+			  unsigned int cmd, unsigned long arg)
+{
+	int err = 0;
+	struct ota_dev_control *podev;
+	uint8_t *user_src;
+	uint8_t *user_dst;
+	uint8_t *k_buf = NULL;
+	struct ota_async_req areq;
+	uint32_t total, temp;
+	struct qcota_stat *pstat;
+	int i;
+	uint8_t *p = NULL;
+
+	podev =  file->private_data;
+	if (podev == NULL || podev->magic != OTA_MAGIC) {
+		pr_err("%s: invalid handle %p\n",
+			__func__, podev);
+		return -ENOENT;
+	}
+
+	/* Verify user arguments. */
+	if (_IOC_TYPE(cmd) != QCOTA_IOC_MAGIC)
+		return -ENOTTY;
+
+	init_completion(&areq.complete);
+
+	pstat = &_qcota_stat;
+
+	switch (cmd) {
+	case QCOTA_F9_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+			       sizeof(struct qce_f9_req)))
+			return -EFAULT;
+		if (__copy_from_user(&areq.req.f9_req, (void __user *)arg,
+				     sizeof(struct qce_f9_req)))
+			return -EFAULT;
+
+		user_src = areq.req.f9_req.message;
+		if (!access_ok(VERIFY_READ, (void __user *)user_src,
+			       areq.req.f9_req.msize))
+			return -EFAULT;
+
+		if (areq.req.f9_req.msize == 0)
+			return 0;
+		k_buf = kmalloc(areq.req.f9_req.msize, GFP_KERNEL);
+		if (k_buf == NULL)
+			return -ENOMEM;
+
+		if (__copy_from_user(k_buf, (void __user *)user_src,
+				areq.req.f9_req.msize)) {
+			kfree(k_buf);
+			return -EFAULT;
+		}
+
+		areq.req.f9_req.message = k_buf;
+		areq.op = QCE_OTA_F9_OPER;
+
+		pstat->f9_req++;
+		err = submit_req(&areq, podev);
+
+		areq.req.f9_req.message = user_src;
+		if (err == 0 && __copy_to_user((void __user *)arg,
+				&areq.req.f9_req, sizeof(struct qce_f9_req))) {
+			err = -EFAULT;
+		}
+		kfree(k_buf);
+		break;
+
+	case QCOTA_F8_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+			       sizeof(struct qce_f8_req)))
+			return -EFAULT;
+		if (__copy_from_user(&areq.req.f8_req, (void __user *)arg,
+				     sizeof(struct qce_f8_req)))
+			return -EFAULT;
+		total = areq.req.f8_req.data_len;
+		user_src = areq.req.f8_req.data_in;
+		if (user_src != NULL) {
+			if (!access_ok(VERIFY_READ, (void __user *)
+					user_src, total))
+				return -EFAULT;
+
+		};
+
+		user_dst = areq.req.f8_req.data_out;
+		if (!access_ok(VERIFY_WRITE, (void __user *)
+				user_dst, total))
+			return -EFAULT;
+
+		if (!total)
+			return 0;
+		k_buf = kmalloc(total, GFP_KERNEL);
+		if (k_buf == NULL)
+			return -ENOMEM;
+
+		/* k_buf returned from kmalloc should be cache line aligned */
+		if (user_src && __copy_from_user(k_buf,
+				(void __user *)user_src, total)) {
+			kfree(k_buf);
+			return -EFAULT;
+		}
+
+		if (user_src)
+			areq.req.f8_req.data_in = k_buf;
+		else
+			areq.req.f8_req.data_in = NULL;
+		areq.req.f8_req.data_out = k_buf;
+
+		areq.op = QCE_OTA_F8_OPER;
+
+		pstat->f8_req++;
+		err = submit_req(&areq, podev);
+
+		if (err == 0 && __copy_to_user(user_dst, k_buf, total))
+			err = -EFAULT;
+		kfree(k_buf);
+
+		break;
+
+	case QCOTA_F8_MPKT_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+			       sizeof(struct qce_f8_multi_pkt_req)))
+			return -EFAULT;
+		if (__copy_from_user(&areq.req.f8_mp_req, (void __user *)arg,
+				     sizeof(struct qce_f8_multi_pkt_req)))
+			return -EFAULT;
+		temp = areq.req.f8_mp_req.qce_f8_req.data_len;
+		if (temp < (uint32_t) areq.req.f8_mp_req.cipher_start +
+				 areq.req.f8_mp_req.cipher_size)
+			return -EINVAL;
+		total = (uint32_t) areq.req.f8_mp_req.num_pkt *
+				areq.req.f8_mp_req.qce_f8_req.data_len;
+
+		user_src = areq.req.f8_mp_req.qce_f8_req.data_in;
+		if (!access_ok(VERIFY_READ, (void __user *)
+				user_src, total))
+			return -EFAULT;
+
+		user_dst = areq.req.f8_mp_req.qce_f8_req.data_out;
+		if (!access_ok(VERIFY_WRITE, (void __user *)
+				user_dst, total))
+			return -EFAULT;
+
+		if (!total)
+			return 0;
+		k_buf = kmalloc(total, GFP_KERNEL);
+		if (k_buf == NULL)
+			return -ENOMEM;
+		/* k_buf returned from kmalloc should be cache line aligned */
+		if (__copy_from_user(k_buf, (void __user *)user_src, total)) {
+			kfree(k_buf);
+
+			return -EFAULT;
+		}
+
+		areq.req.f8_mp_req.qce_f8_req.data_out = k_buf;
+		areq.req.f8_mp_req.qce_f8_req.data_in = k_buf;
+
+		areq.op = QCE_OTA_MPKT_F8_OPER;
+
+		pstat->f8_mp_req++;
+		err = submit_req(&areq, podev);
+
+		if (err == 0 && __copy_to_user(user_dst, k_buf, total))
+			err = -EFAULT;
+		kfree(k_buf);
+		break;
+
+	case QCOTA_F8_V_MPKT_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+				sizeof(struct qce_f8_varible_multi_pkt_req)))
+			return -EFAULT;
+		if (__copy_from_user(&areq.req.f8_v_mp_req, (void __user *)arg,
+				sizeof(struct qce_f8_varible_multi_pkt_req)))
+			return -EFAULT;
+
+		if (areq.req.f8_v_mp_req.num_pkt > MAX_NUM_V_MULTI_PKT)
+			return -EINVAL;
+
+		for (i = 0, total = 0; i < areq.req.f8_v_mp_req.num_pkt; i++) {
+			if (!access_ok(VERIFY_WRITE, (void __user *)
+				areq.req.f8_v_mp_req.cipher_iov[i].addr,
+				areq.req.f8_v_mp_req.cipher_iov[i].size))
+				return -EFAULT;
+			total += areq.req.f8_v_mp_req.cipher_iov[i].size;
+			total = ALIGN(total, L1_CACHE_BYTES);
+		}
+
+		if (!total)
+			return 0;
+		k_buf = kmalloc(total, GFP_KERNEL);
+		if (k_buf == NULL)
+			return -ENOMEM;
+
+		for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
+			user_src =  areq.req.f8_v_mp_req.cipher_iov[i].addr;
+			if (__copy_from_user(p, (void __user *)user_src,
+				areq.req.f8_v_mp_req.cipher_iov[i].size)) {
+				kfree(k_buf);
+				return -EFAULT;
+			}
+			p += areq.req.f8_v_mp_req.cipher_iov[i].size;
+			p = (uint8_t *) ALIGN(((uintptr_t)p),
+							L1_CACHE_BYTES);
+		}
+
+		areq.req.f8_v_mp_req.qce_f8_req.data_out = k_buf;
+		areq.req.f8_v_mp_req.qce_f8_req.data_in = k_buf;
+		areq.req.f8_v_mp_req.qce_f8_req.data_len =
+			areq.req.f8_v_mp_req.cipher_iov[0].size;
+		areq.steps = 0;
+		areq.op = QCE_OTA_VAR_MPKT_F8_OPER;
+
+		pstat->f8_v_mp_req++;
+		err = submit_req(&areq, podev);
+
+		if (err != 0) {
+			kfree(k_buf);
+			return err;
+		}
+
+		for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
+			user_dst =  areq.req.f8_v_mp_req.cipher_iov[i].addr;
+			if (__copy_to_user(user_dst, p,
+				areq.req.f8_v_mp_req.cipher_iov[i].size)) {
+				kfree(k_buf);
+				return -EFAULT;
+			}
+			p += areq.req.f8_v_mp_req.cipher_iov[i].size;
+			p = (uint8_t *) ALIGN(((uintptr_t)p),
+							L1_CACHE_BYTES);
+		}
+		kfree(k_buf);
+		break;
+	default:
+		return -ENOTTY;
+	}
+
+	return err;
+}
+
+static int qcota_probe(struct platform_device *pdev)
+{
+	void *handle = NULL;
+	int rc = 0;
+	struct ota_dev_control *podev;
+	struct ce_hw_support ce_support;
+	struct ota_qce_dev *pqce;
+	unsigned long flags;
+
+	podev = &qcota_dev;
+	pqce = kzalloc(sizeof(*pqce), GFP_KERNEL);
+	if (!pqce) {
+		pr_err("qcota_probe: Memory allocation FAIL\n");
+		return -ENOMEM;
+	}
+
+	pqce->podev = podev;
+	pqce->active_command = NULL;
+	tasklet_init(&pqce->done_tasklet, req_done, (unsigned long)pqce);
+
+	/* open qce */
+	handle = qce_open(pdev, &rc);
+	if (handle == NULL) {
+		pr_err("%s: device %s, can not open qce\n",
+			__func__, pdev->name);
+		goto err;
+	}
+	if (qce_hw_support(handle, &ce_support) < 0 ||
+					ce_support.ota == false) {
+		pr_err("%s: device %s, qce does not support ota capability\n",
+			__func__, pdev->name);
+		rc = -ENODEV;
+		goto err;
+	}
+	pqce->qce = handle;
+	pqce->pdev = pdev;
+	pqce->total_req = 0;
+	pqce->err_req = 0;
+	platform_set_drvdata(pdev, pqce);
+
+	mutex_lock(&podev->register_lock);
+	rc = 0;
+	if (podev->registered == false) {
+		rc = misc_register(&podev->miscdevice);
+		if (rc == 0) {
+			pqce->unit = podev->total_units;
+			podev->total_units++;
+			podev->registered = true;
+		};
+	} else {
+		pqce->unit = podev->total_units;
+		podev->total_units++;
+	}
+	mutex_unlock(&podev->register_lock);
+	if (rc) {
+		pr_err("ion: failed to register misc device.\n");
+		goto err;
+	}
+
+	spin_lock_irqsave(&podev->lock, flags);
+	list_add_tail(&pqce->qlist, &podev->qce_dev);
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	return 0;
+err:
+	if (handle)
+		qce_close(handle);
+
+	platform_set_drvdata(pdev, NULL);
+	tasklet_kill(&pqce->done_tasklet);
+	kfree(pqce);
+	return rc;
+}
+
+static int qcota_remove(struct platform_device *pdev)
+{
+	struct ota_dev_control *podev;
+	struct ota_qce_dev *pqce;
+	unsigned long flags;
+
+	pqce = platform_get_drvdata(pdev);
+	if (!pqce)
+		return 0;
+	if (pqce->qce)
+		qce_close(pqce->qce);
+
+	podev = pqce->podev;
+	if (!podev)
+		goto ret;
+
+	spin_lock_irqsave(&podev->lock, flags);
+	list_del(&pqce->qlist);
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	mutex_lock(&podev->register_lock);
+	if (--podev->total_units == 0) {
+		if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
+			misc_deregister(&podev->miscdevice);
+		podev->registered = false;
+	}
+	mutex_unlock(&podev->register_lock);
+ret:
+
+	tasklet_kill(&pqce->done_tasklet);
+	kfree(pqce);
+	return 0;
+}
+
+static const struct of_device_id qcota_match[] = {
+	{	.compatible = "qcom,qcota",
+	},
+	{}
+};
+
+static struct platform_driver qcota_plat_driver = {
+	.probe = qcota_probe,
+	.remove = qcota_remove,
+	.driver = {
+		.name = "qcota",
+		.owner = THIS_MODULE,
+		.of_match_table = qcota_match,
+	},
+};
+
+static int _disp_stats(void)
+{
+	struct qcota_stat *pstat;
+	int len = 0;
+	struct ota_dev_control *podev = &qcota_dev;
+	unsigned long flags;
+	struct ota_qce_dev *p;
+
+	pstat = &_qcota_stat;
+	len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+			"\nQTI OTA crypto accelerator Statistics:\n");
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 request                      : %llu\n",
+					pstat->f8_req);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 operation success            : %llu\n",
+					pstat->f8_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 operation fail               : %llu\n",
+					pstat->f8_op_fail);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 MP request                   : %llu\n",
+					pstat->f8_mp_req);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 MP operation success         : %llu\n",
+					pstat->f8_mp_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 MP operation fail            : %llu\n",
+					pstat->f8_mp_op_fail);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 Variable MP request          : %llu\n",
+					pstat->f8_v_mp_req);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 Variable MP operation success: %llu\n",
+					pstat->f8_v_mp_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 Variable MP operation fail   : %llu\n",
+					pstat->f8_v_mp_op_fail);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F9 request                      : %llu\n",
+					pstat->f9_req);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F9 operation success            : %llu\n",
+					pstat->f9_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F9 operation fail               : %llu\n",
+					pstat->f9_op_fail);
+
+	spin_lock_irqsave(&podev->lock, flags);
+
+	list_for_each_entry(p, &podev->qce_dev, qlist) {
+		len += scnprintf(
+			_debug_read_buf + len,
+			DEBUG_MAX_RW_BUF - len - 1,
+			"   Engine %4d Req                 : %llu\n",
+			p->unit,
+			p->total_req
+		);
+		len += scnprintf(
+			_debug_read_buf + len,
+			DEBUG_MAX_RW_BUF - len - 1,
+			"   Engine %4d Req Error           : %llu\n",
+			p->unit,
+			p->err_req
+		);
+	}
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	return len;
+}
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	int rc = -EINVAL;
+	int len;
+
+	len = _disp_stats();
+	if (len <= count)
+		rc = simple_read_from_buffer((void __user *) buf, len,
+			ppos, (void *) _debug_read_buf, len);
+
+	return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	struct ota_dev_control *podev = &qcota_dev;
+	unsigned long flags;
+	struct ota_qce_dev *p;
+
+	memset((char *)&_qcota_stat, 0, sizeof(struct qcota_stat));
+
+	spin_lock_irqsave(&podev->lock, flags);
+
+	list_for_each_entry(p, &podev->qce_dev, qlist) {
+		p->total_req = 0;
+		p->err_req = 0;
+	}
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	return count;
+}
+
+static const struct file_operations _debug_stats_ops = {
+	.open =         _debug_stats_open,
+	.read =         _debug_stats_read,
+	.write =        _debug_stats_write,
+};
+
+static int _qcota_debug_init(void)
+{
+	int rc;
+	char name[DEBUG_MAX_FNAME];
+	struct dentry *dent;
+
+	_debug_dent = debugfs_create_dir("qcota", NULL);
+	if (IS_ERR(_debug_dent)) {
+		pr_err("qcota debugfs_create_dir fail, error %ld\n",
+				PTR_ERR(_debug_dent));
+		return PTR_ERR(_debug_dent);
+	}
+
+	snprintf(name, DEBUG_MAX_FNAME-1, "stats-0");
+	_debug_qcota = 0;
+	dent = debugfs_create_file(name, 0644, _debug_dent,
+				&_debug_qcota, &_debug_stats_ops);
+	if (dent == NULL) {
+		pr_err("qcota debugfs_create_file fail, error %ld\n",
+					PTR_ERR(dent));
+		rc = PTR_ERR(dent);
+		goto err;
+	}
+	return 0;
+err:
+	debugfs_remove_recursive(_debug_dent);
+	return rc;
+}
+
+static int __init qcota_init(void)
+{
+	int rc;
+	struct ota_dev_control *podev;
+
+	rc = _qcota_debug_init();
+	if (rc)
+		return rc;
+
+	podev = &qcota_dev;
+	INIT_LIST_HEAD(&podev->ready_commands);
+	INIT_LIST_HEAD(&podev->qce_dev);
+	spin_lock_init(&podev->lock);
+	mutex_init(&podev->register_lock);
+	podev->registered = false;
+	podev->total_units = 0;
+
+	return platform_driver_register(&qcota_plat_driver);
+}
+static void __exit qcota_exit(void)
+{
+	debugfs_remove_recursive(_debug_dent);
+	platform_driver_unregister(&qcota_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI Ota Crypto driver");
+
+module_init(qcota_init);
+module_exit(qcota_exit);
diff --git a/drivers/crypto/msm/qce.h b/drivers/crypto/msm/qce.h
new file mode 100644
index 0000000..7b4ca24
--- /dev/null
+++ b/drivers/crypto/msm/qce.h
@@ -0,0 +1,191 @@
+/*
+ * QTI Crypto Engine driver API
+ *
+ * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#ifndef __CRYPTO_MSM_QCE_H
+#define __CRYPTO_MSM_QCE_H
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/crypto.h>
+
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/sha.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+
+/* SHA digest size  in bytes */
+#define SHA256_DIGESTSIZE		32
+#define SHA1_DIGESTSIZE			20
+
+#define AES_CE_BLOCK_SIZE		16
+
+/* key size in bytes */
+#define HMAC_KEY_SIZE			(SHA1_DIGESTSIZE)    /* hmac-sha1 */
+#define SHA_HMAC_KEY_SIZE		64
+#define DES_KEY_SIZE			8
+#define TRIPLE_DES_KEY_SIZE		24
+#define AES128_KEY_SIZE			16
+#define AES192_KEY_SIZE			24
+#define AES256_KEY_SIZE			32
+#define MAX_CIPHER_KEY_SIZE		AES256_KEY_SIZE
+
+/* iv length in bytes */
+#define AES_IV_LENGTH			16
+#define DES_IV_LENGTH                   8
+#define MAX_IV_LENGTH			AES_IV_LENGTH
+
+/* Maximum number of bytes per transfer */
+#define QCE_MAX_OPER_DATA		0xFF00
+
+/* Maximum Nonce bytes  */
+#define MAX_NONCE  16
+
+typedef void (*qce_comp_func_ptr_t)(void *areq,
+		unsigned char *icv, unsigned char *iv, int ret);
+
+/* Cipher algorithms supported */
+enum qce_cipher_alg_enum {
+	CIPHER_ALG_DES = 0,
+	CIPHER_ALG_3DES = 1,
+	CIPHER_ALG_AES = 2,
+	CIPHER_ALG_LAST
+};
+
+/* Hash and hmac algorithms supported */
+enum qce_hash_alg_enum {
+	QCE_HASH_SHA1   = 0,
+	QCE_HASH_SHA256 = 1,
+	QCE_HASH_SHA1_HMAC   = 2,
+	QCE_HASH_SHA256_HMAC = 3,
+	QCE_HASH_AES_CMAC = 4,
+	QCE_HASH_LAST
+};
+
+/* Cipher encryption/decryption operations */
+enum qce_cipher_dir_enum {
+	QCE_ENCRYPT = 0,
+	QCE_DECRYPT = 1,
+	QCE_CIPHER_DIR_LAST
+};
+
+/* Cipher algorithms modes */
+enum qce_cipher_mode_enum {
+	QCE_MODE_CBC = 0,
+	QCE_MODE_ECB = 1,
+	QCE_MODE_CTR = 2,
+	QCE_MODE_XTS = 3,
+	QCE_MODE_CCM = 4,
+	QCE_CIPHER_MODE_LAST
+};
+
+/* Cipher operation type */
+enum qce_req_op_enum {
+	QCE_REQ_ABLK_CIPHER = 0,
+	QCE_REQ_ABLK_CIPHER_NO_KEY = 1,
+	QCE_REQ_AEAD = 2,
+	QCE_REQ_LAST
+};
+
+/* Algorithms/features supported in CE HW engine */
+struct ce_hw_support {
+	bool sha1_hmac_20; /* Supports 20 bytes of HMAC key*/
+	bool sha1_hmac; /* supports max HMAC key of 64 bytes*/
+	bool sha256_hmac; /* supports max HMAC key of 64 bytes*/
+	bool sha_hmac; /* supports SHA1 and SHA256 MAX HMAC key of 64 bytes*/
+	bool cmac;
+	bool aes_key_192;
+	bool aes_xts;
+	bool aes_ccm;
+	bool ota;
+	bool aligned_only;
+	bool bam;
+	bool is_shared;
+	bool hw_key;
+	bool use_sw_aes_cbc_ecb_ctr_algo;
+	bool use_sw_aead_algo;
+	bool use_sw_aes_xts_algo;
+	bool use_sw_ahash_algo;
+	bool use_sw_hmac_algo;
+	bool use_sw_aes_ccm_algo;
+	bool clk_mgmt_sus_res;
+	unsigned int ce_device;
+	unsigned int ce_hw_instance;
+	unsigned int max_request;
+};
+
+/* Sha operation parameters */
+struct qce_sha_req {
+	qce_comp_func_ptr_t qce_cb;	/* call back */
+	enum qce_hash_alg_enum alg;	/* sha algorithm */
+	unsigned char *digest;		/* sha digest  */
+	struct scatterlist *src;	/* pointer to scatter list entry */
+	uint32_t  auth_data[4];		/* byte count */
+	unsigned char *authkey;		/* auth key */
+	unsigned int  authklen;		/* auth key length */
+	bool first_blk;			/* first block indicator */
+	bool last_blk;			/* last block indicator */
+	unsigned int size;		/* data length in bytes */
+	void *areq;
+	unsigned int  flags;
+};
+
+struct qce_req {
+	enum qce_req_op_enum op;	/* operation type */
+	qce_comp_func_ptr_t qce_cb;	/* call back */
+	void *areq;
+	enum qce_cipher_alg_enum   alg;	/* cipher algorithms*/
+	enum qce_cipher_dir_enum dir;	/* encryption? decryption? */
+	enum qce_cipher_mode_enum mode;	/* algorithm mode  */
+	enum qce_hash_alg_enum auth_alg;/* authentication algorithm for aead */
+	unsigned char *authkey;		/* authentication key  */
+	unsigned int authklen;		/* authentication key kength */
+	unsigned int authsize;		/* authentication key kength */
+	unsigned char  nonce[MAX_NONCE];/* nonce for ccm mode */
+	unsigned char *assoc;		/* Ptr to formatted associated data */
+	unsigned int assoclen;		/* Formatted associated data length  */
+	struct scatterlist *asg;	/* Formatted associated data sg  */
+	unsigned char *enckey;		/* cipher key  */
+	unsigned int encklen;		/* cipher key length */
+	unsigned char *iv;		/* initialization vector */
+	unsigned int ivsize;		/* initialization vector size*/
+	unsigned int cryptlen;		/* data length */
+	unsigned int use_pmem;		/* is source of data PMEM allocated? */
+	struct qcedev_pmem_info *pmem;	/* pointer to pmem_info structure*/
+	unsigned int  flags;
+};
+
+struct qce_pm_table {
+	int (*suspend)(void *handle);
+	int (*resume)(void *handle);
+};
+
+extern struct qce_pm_table qce_pm_table;
+
+void *qce_open(struct platform_device *pdev, int *rc);
+int qce_close(void *handle);
+int qce_aead_req(void *handle, struct qce_req *req);
+int qce_ablk_cipher_req(void *handle, struct qce_req *req);
+int qce_hw_support(void *handle, struct ce_hw_support *support);
+int qce_process_sha_req(void *handle, struct qce_sha_req *s_req);
+int qce_enable_clk(void *handle);
+int qce_disable_clk(void *handle);
+void qce_get_driver_stats(void *handle);
+void qce_clear_driver_stats(void *handle);
+
+#endif /* __CRYPTO_MSM_QCE_H */
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
new file mode 100644
index 0000000..0cf4386
--- /dev/null
+++ b/drivers/crypto/msm/qce50.c
@@ -0,0 +1,6141 @@
+/*
+ * QTI Crypto Engine driver.
+ *
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "QCE50: %s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/bitops.h>
+#include <linux/clk/qcom.h>
+#include <linux/qcrypto.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <soc/qcom/socinfo.h>
+
+#include "qce.h"
+#include "qce50.h"
+#include "qcryptohw_50.h"
+#include "qce_ota.h"
+
+#define CRYPTO_CONFIG_RESET 0xE01EF
+#define MAX_SPS_DESC_FIFO_SIZE 0xfff0
+#define QCE_MAX_NUM_DSCR    0x200
+#define QCE_SECTOR_SIZE	    0x200
+#define CE_CLK_100MHZ	100000000
+#define CE_CLK_DIV	1000000
+
+#define CRYPTO_CORE_MAJOR_VER_NUM 0x05
+#define CRYPTO_CORE_MINOR_VER_NUM 0x03
+#define CRYPTO_CORE_STEP_VER_NUM 0x1
+
+#define CRYPTO_REQ_USER_PAT 0xdead0000
+
+static DEFINE_MUTEX(bam_register_lock);
+static DEFINE_MUTEX(qce_iomap_mutex);
+
+struct bam_registration_info {
+	struct list_head qlist;
+	unsigned long handle;
+	uint32_t cnt;
+	uint32_t bam_mem;
+	void __iomem *bam_iobase;
+	bool support_cmd_dscr;
+};
+static LIST_HEAD(qce50_bam_list);
+
+/* Used to determine the mode */
+#define MAX_BUNCH_MODE_REQ 2
+/* Max number of request supported */
+#define MAX_QCE_BAM_REQ 8
+/* Interrupt flag will be set for every SET_INTR_AT_REQ request */
+#define SET_INTR_AT_REQ			(MAX_QCE_BAM_REQ / 2)
+/* To create extra request space to hold dummy request */
+#define MAX_QCE_BAM_REQ_WITH_DUMMY_REQ	(MAX_QCE_BAM_REQ + 1)
+/* Allocate the memory for MAX_QCE_BAM_REQ  + 1 (for dummy request) */
+#define MAX_QCE_ALLOC_BAM_REQ		MAX_QCE_BAM_REQ_WITH_DUMMY_REQ
+/* QCE driver modes */
+#define IN_INTERRUPT_MODE 0
+#define IN_BUNCH_MODE 1
+/* Dummy request data length */
+#define DUMMY_REQ_DATA_LEN 64
+/* Delay timer to expire when in bunch mode */
+#define DELAY_IN_JIFFIES 5
+/* Index to point the dummy request */
+#define DUMMY_REQ_INDEX			MAX_QCE_BAM_REQ
+
+#define TOTAL_IOVEC_SPACE_PER_PIPE (QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec))
+
+enum qce_owner {
+	QCE_OWNER_NONE   = 0,
+	QCE_OWNER_CLIENT = 1,
+	QCE_OWNER_TIMEOUT = 2
+};
+
+struct dummy_request {
+	struct qce_sha_req sreq;
+	struct scatterlist sg;
+	struct ahash_request areq;
+};
+
+/*
+ * CE HW device structure.
+ * Each engine has an instance of the structure.
+ * Each engine can only handle one crypto operation at one time. It is up to
+ * the sw above to ensure single threading of operation on an engine.
+ */
+struct qce_device {
+	struct device *pdev;        /* Handle to platform_device structure */
+	struct bam_registration_info *pbam;
+
+	unsigned char *coh_vmem;    /* Allocated coherent virtual memory */
+	dma_addr_t coh_pmem;	    /* Allocated coherent physical memory */
+	int memsize;				/* Memory allocated */
+	unsigned char *iovec_vmem;  /* Allocate iovec virtual memory */
+	int iovec_memsize;				/* Memory allocated */
+	uint32_t bam_mem;		/* bam physical address, from DT */
+	uint32_t bam_mem_size;		/* bam io size, from DT */
+	int is_shared;			/* CE HW is shared */
+	bool support_cmd_dscr;
+	bool support_hw_key;
+	bool support_clk_mgmt_sus_res;
+	bool support_only_core_src_clk;
+
+	void __iomem *iobase;	    /* Virtual io base of CE HW  */
+	unsigned int phy_iobase;    /* Physical io base of CE HW    */
+
+	struct clk *ce_core_src_clk;	/* Handle to CE src clk*/
+	struct clk *ce_core_clk;	/* Handle to CE clk */
+	struct clk *ce_clk;		/* Handle to CE clk */
+	struct clk *ce_bus_clk;	/* Handle to CE AXI clk*/
+	bool no_get_around;
+	bool no_ccm_mac_status_get_around;
+	unsigned int ce_opp_freq_hz;
+	bool use_sw_aes_cbc_ecb_ctr_algo;
+	bool use_sw_aead_algo;
+	bool use_sw_aes_xts_algo;
+	bool use_sw_ahash_algo;
+	bool use_sw_hmac_algo;
+	bool use_sw_aes_ccm_algo;
+	uint32_t engines_avail;
+	struct qce_ce_cfg_reg_setting reg;
+	struct ce_bam_info ce_bam_info;
+	struct ce_request_info ce_request_info[MAX_QCE_ALLOC_BAM_REQ];
+	unsigned int ce_request_index;
+	enum qce_owner owner;
+	atomic_t no_of_queued_req;
+	struct timer_list timer;
+	struct dummy_request dummyreq;
+	unsigned int mode;
+	unsigned int intr_cadence;
+	unsigned int dev_no;
+	struct qce_driver_stats qce_stats;
+	atomic_t bunch_cmd_seq;
+	atomic_t last_intr_seq;
+	bool cadence_flag;
+	uint8_t *dummyreq_in_buf;
+};
+
+static void print_notify_debug(struct sps_event_notify *notify);
+static void _sps_producer_callback(struct sps_event_notify *notify);
+static int qce_dummy_req(struct qce_device *pce_dev);
+
+static int _qce50_disp_stats;
+
+/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
+static uint32_t  _std_init_vector_sha1[] =   {
+	0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
+};
+
+/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint32_t _std_init_vector_sha256[] = {
+	0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
+	0x510E527F, 0x9B05688C,	0x1F83D9AB, 0x5BE0CD19
+};
+
+static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned int n;
+
+	n = len  / sizeof(uint32_t);
+	for (; n > 0; n--) {
+		*iv =  ((*b << 24)      & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     |
+				(*(b+3)          & 0xff);
+		b += sizeof(uint32_t);
+		iv++;
+	}
+
+	n = len %  sizeof(uint32_t);
+	if (n == 3) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00);
+	} else if (n == 2) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000);
+	} else if (n == 1) {
+		*iv = ((*b << 24) & 0xff000000);
+	}
+}
+
+static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned int i, j;
+	unsigned char swap_iv[AES_IV_LENGTH];
+
+	memset(swap_iv, 0, AES_IV_LENGTH);
+	for (i = (AES_IV_LENGTH-len), j = len-1;  i < AES_IV_LENGTH; i++, j--)
+		swap_iv[i] = b[j];
+	_byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
+}
+
+static int count_sg(struct scatterlist *sg, int nbytes)
+{
+	int i;
+
+	for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
+		nbytes -= sg->length;
+	return i;
+}
+
+static int qce_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+	enum dma_data_direction direction)
+{
+	int i;
+
+	for (i = 0; i < nents; ++i) {
+		dma_map_sg(dev, sg, 1, direction);
+		sg = sg_next(sg);
+	}
+
+	return nents;
+}
+
+static int qce_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+	int nents, enum dma_data_direction direction)
+{
+	int i;
+
+	for (i = 0; i < nents; ++i) {
+		dma_unmap_sg(dev, sg, 1, direction);
+		sg = sg_next(sg);
+	}
+
+	return nents;
+}
+
+static int _probe_ce_engine(struct qce_device *pce_dev)
+{
+	unsigned int rev;
+	unsigned int maj_rev, min_rev, step_rev;
+
+	rev = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before checking the version.
+	 */
+	mb();
+	maj_rev = (rev & CRYPTO_CORE_MAJOR_REV_MASK) >> CRYPTO_CORE_MAJOR_REV;
+	min_rev = (rev & CRYPTO_CORE_MINOR_REV_MASK) >> CRYPTO_CORE_MINOR_REV;
+	step_rev = (rev & CRYPTO_CORE_STEP_REV_MASK) >> CRYPTO_CORE_STEP_REV;
+
+	if (maj_rev != CRYPTO_CORE_MAJOR_VER_NUM) {
+		pr_err("Unsupported QTI crypto device at 0x%x, rev %d.%d.%d\n",
+			pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
+		return -EIO;
+	}
+
+	/*
+	 * The majority of crypto HW bugs have been fixed in 5.3.0 and
+	 * above. That allows a single sps transfer of consumer
+	 * pipe, and a single sps transfer of producer pipe
+	 * for a crypto request. no_get_around flag indicates this.
+	 *
+	 * In 5.3.1, the CCM MAC_FAILED in result dump issue is
+	 * fixed. no_ccm_mac_status_get_around flag indicates this.
+	 */
+	pce_dev->no_get_around = (min_rev >=
+			CRYPTO_CORE_MINOR_VER_NUM) ? true : false;
+	if (min_rev > CRYPTO_CORE_MINOR_VER_NUM)
+		pce_dev->no_ccm_mac_status_get_around = true;
+	else if ((min_rev == CRYPTO_CORE_MINOR_VER_NUM) &&
+			 (step_rev >= CRYPTO_CORE_STEP_VER_NUM))
+		pce_dev->no_ccm_mac_status_get_around = true;
+	else
+		pce_dev->no_ccm_mac_status_get_around = false;
+
+	pce_dev->ce_bam_info.minor_version = min_rev;
+
+	pce_dev->engines_avail = readl_relaxed(pce_dev->iobase +
+					CRYPTO_ENGINES_AVAIL);
+	dev_info(pce_dev->pdev, "QTI Crypto %d.%d.%d device found @0x%x\n",
+			maj_rev, min_rev, step_rev, pce_dev->phy_iobase);
+
+	pce_dev->ce_bam_info.ce_burst_size = MAX_CE_BAM_BURST_SIZE;
+
+	dev_info(pce_dev->pdev, "CE device = 0x%x\n, IO base, CE = 0x%p\n, Consumer (IN) PIPE %d,    Producer (OUT) PIPE %d\n IO base BAM = 0x%p\n BAM IRQ %d\n Engines Availability = 0x%x\n",
+			pce_dev->ce_bam_info.ce_device, pce_dev->iobase,
+			pce_dev->ce_bam_info.dest_pipe_index,
+			pce_dev->ce_bam_info.src_pipe_index,
+			pce_dev->ce_bam_info.bam_iobase,
+			pce_dev->ce_bam_info.bam_irq, pce_dev->engines_avail);
+	return 0;
+};
+
+static struct qce_cmdlist_info *_ce_get_hash_cmdlistinfo(
+			struct qce_device *pce_dev,
+			int req_info, struct qce_sha_req *sreq)
+{
+	struct ce_sps_data *pce_sps_data;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+
+	pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+	cmdlistptr = &pce_sps_data->cmdlistptr;
+	switch (sreq->alg) {
+	case QCE_HASH_SHA1:
+		return &cmdlistptr->auth_sha1;
+	case QCE_HASH_SHA256:
+		return &cmdlistptr->auth_sha256;
+	case QCE_HASH_SHA1_HMAC:
+		return &cmdlistptr->auth_sha1_hmac;
+	case QCE_HASH_SHA256_HMAC:
+		return &cmdlistptr->auth_sha256_hmac;
+	case QCE_HASH_AES_CMAC:
+		if (sreq->authklen == AES128_KEY_SIZE)
+			return &cmdlistptr->auth_aes_128_cmac;
+		return &cmdlistptr->auth_aes_256_cmac;
+	default:
+		return NULL;
+	}
+	return NULL;
+}
+
+static int _ce_setup_hash(struct qce_device *pce_dev,
+				struct qce_sha_req *sreq,
+				struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
+	uint32_t diglen;
+	int i;
+	uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	bool sha1 = false;
+	struct sps_command_element *pce = NULL;
+	bool use_hw_key = false;
+	bool use_pipe_key = false;
+	uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
+	uint32_t auth_cfg;
+
+	if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC) ||
+			(sreq->alg ==  QCE_HASH_AES_CMAC)) {
+
+
+		/* no more check for null key. use flag */
+		if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY)
+						== QCRYPTO_CTX_USE_HW_KEY)
+			use_hw_key = true;
+		else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+						QCRYPTO_CTX_USE_PIPE_KEY)
+			use_pipe_key = true;
+		pce = cmdlistinfo->go_proc;
+		if (use_hw_key == true) {
+			pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
+							pce_dev->phy_iobase);
+		} else {
+			pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
+							pce_dev->phy_iobase);
+			pce = cmdlistinfo->auth_key;
+			if (use_pipe_key == false) {
+				_byte_stream_to_net_words(mackey32,
+						sreq->authkey,
+						sreq->authklen);
+				for (i = 0; i < authk_size_in_word; i++, pce++)
+					pce->data = mackey32[i];
+			}
+		}
+	}
+
+	if (sreq->alg ==  QCE_HASH_AES_CMAC)
+		goto go_proc;
+
+	/* if not the last, the size has to be on the block boundary */
+	if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
+		return -EIO;
+
+	switch (sreq->alg) {
+	case QCE_HASH_SHA1:
+	case QCE_HASH_SHA1_HMAC:
+		diglen = SHA1_DIGEST_SIZE;
+		sha1 = true;
+		break;
+	case QCE_HASH_SHA256:
+	case QCE_HASH_SHA256_HMAC:
+		diglen = SHA256_DIGEST_SIZE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
+	if (sreq->first_blk) {
+		if (sha1) {
+			for (i = 0; i < 5; i++)
+				auth32[i] = _std_init_vector_sha1[i];
+		} else {
+			for (i = 0; i < 8; i++)
+				auth32[i] = _std_init_vector_sha256[i];
+		}
+	} else {
+		_byte_stream_to_net_words(auth32, sreq->digest, diglen);
+	}
+
+	pce = cmdlistinfo->auth_iv;
+	for (i = 0; i < 5; i++, pce++)
+		pce->data = auth32[i];
+
+	if ((sreq->alg == QCE_HASH_SHA256) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC)) {
+		for (i = 5; i < 8; i++, pce++)
+			pce->data = auth32[i];
+	}
+
+	/* write auth_bytecnt 0/1, start with 0 */
+	pce = cmdlistinfo->auth_bytecount;
+	for (i = 0; i < 2; i++, pce++)
+		pce->data = sreq->auth_data[i];
+
+	/* Set/reset  last bit in CFG register  */
+	pce = cmdlistinfo->auth_seg_cfg;
+	auth_cfg = pce->data & ~(1 << CRYPTO_LAST |
+				1 << CRYPTO_FIRST |
+				1 << CRYPTO_USE_PIPE_KEY_AUTH |
+				1 << CRYPTO_USE_HW_KEY_AUTH);
+	if (sreq->last_blk)
+		auth_cfg |= 1 << CRYPTO_LAST;
+	if (sreq->first_blk)
+		auth_cfg |= 1 << CRYPTO_FIRST;
+	if (use_hw_key)
+		auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
+	if (use_pipe_key)
+		auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
+	pce->data = auth_cfg;
+go_proc:
+	/* write auth seg size */
+	pce = cmdlistinfo->auth_seg_size;
+	pce->data = sreq->size;
+
+	pce = cmdlistinfo->encr_seg_cfg;
+	pce->data = 0;
+
+	/* write auth seg size start*/
+	pce = cmdlistinfo->auth_seg_start;
+	pce->data = 0;
+
+	/* write seg size */
+	pce = cmdlistinfo->seg_size;
+
+	/* always ensure there is input data. ZLT does not work for bam-ndp */
+	if (sreq->size)
+		pce->data = sreq->size;
+	else
+		pce->data = pce_dev->ce_bam_info.ce_burst_size;
+
+	return 0;
+}
+
+static struct qce_cmdlist_info *_ce_get_aead_cmdlistinfo(
+			struct qce_device *pce_dev,
+			int req_info, struct qce_req *creq)
+{
+	struct ce_sps_data *pce_sps_data;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+
+	pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+	cmdlistptr = &pce_sps_data->cmdlistptr;
+	switch (creq->alg) {
+	case CIPHER_ALG_DES:
+		switch (creq->mode) {
+		case QCE_MODE_CBC:
+			if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+				return &cmdlistptr->aead_hmac_sha1_cbc_des;
+			else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
+				return &cmdlistptr->aead_hmac_sha256_cbc_des;
+			else
+				return NULL;
+			break;
+		default:
+			return NULL;
+		}
+		break;
+	case CIPHER_ALG_3DES:
+		switch (creq->mode) {
+		case QCE_MODE_CBC:
+			if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+				return &cmdlistptr->aead_hmac_sha1_cbc_3des;
+			else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
+				return &cmdlistptr->aead_hmac_sha256_cbc_3des;
+			else
+				return NULL;
+			break;
+		default:
+			return NULL;
+		}
+		break;
+	case CIPHER_ALG_AES:
+		switch (creq->mode) {
+		case QCE_MODE_CBC:
+			if (creq->encklen ==  AES128_KEY_SIZE) {
+				if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+					return &cmdlistptr->
+						aead_hmac_sha1_cbc_aes_128;
+				else if (creq->auth_alg ==
+						QCE_HASH_SHA256_HMAC)
+					return &cmdlistptr->
+						aead_hmac_sha256_cbc_aes_128;
+				else
+					return NULL;
+			} else if (creq->encklen ==  AES256_KEY_SIZE) {
+				if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+					return &cmdlistptr->
+						aead_hmac_sha1_cbc_aes_256;
+				else if (creq->auth_alg ==
+						QCE_HASH_SHA256_HMAC)
+					return &cmdlistptr->
+						aead_hmac_sha256_cbc_aes_256;
+				else
+					return NULL;
+			} else
+				return NULL;
+			break;
+		default:
+			return NULL;
+		}
+		break;
+
+	default:
+		return NULL;
+	}
+	return NULL;
+}
+
+static int _ce_setup_aead(struct qce_device *pce_dev, struct qce_req *q_req,
+		uint32_t totallen_in, uint32_t coffset,
+		struct qce_cmdlist_info *cmdlistinfo)
+{
+	int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
+	int i;
+	uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
+	struct sps_command_element *pce;
+	uint32_t a_cfg;
+	uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
+	uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
+	uint32_t enck_size_in_word = 0;
+	uint32_t enciv_in_word;
+	uint32_t key_size;
+	uint32_t encr_cfg = 0;
+	uint32_t ivsize = q_req->ivsize;
+
+	key_size = q_req->encklen;
+	enck_size_in_word = key_size/sizeof(uint32_t);
+
+	switch (q_req->alg) {
+	case CIPHER_ALG_DES:
+		enciv_in_word = 2;
+		break;
+	case CIPHER_ALG_3DES:
+		enciv_in_word = 2;
+		break;
+	case CIPHER_ALG_AES:
+		if ((key_size != AES128_KEY_SIZE) &&
+				(key_size != AES256_KEY_SIZE))
+			return -EINVAL;
+		enciv_in_word = 4;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* only support cbc mode */
+	if (q_req->mode != QCE_MODE_CBC)
+		return -EINVAL;
+
+	_byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+	pce = cmdlistinfo->encr_cntr_iv;
+	for (i = 0; i < enciv_in_word; i++, pce++)
+		pce->data = enciv32[i];
+
+	/*
+	 * write encr key
+	 * do not use  hw key or pipe key
+	 */
+	_byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
+	pce = cmdlistinfo->encr_key;
+	for (i = 0; i < enck_size_in_word; i++, pce++)
+		pce->data = enckey32[i];
+
+	/* write encr seg cfg */
+	pce = cmdlistinfo->encr_seg_cfg;
+	encr_cfg = pce->data;
+	if (q_req->dir == QCE_ENCRYPT)
+		encr_cfg |= (1 << CRYPTO_ENCODE);
+	else
+		encr_cfg &= ~(1 << CRYPTO_ENCODE);
+	pce->data = encr_cfg;
+
+	/* we only support sha1-hmac and sha256-hmac at this point */
+	_byte_stream_to_net_words(mackey32, q_req->authkey,
+					q_req->authklen);
+	pce = cmdlistinfo->auth_key;
+	for (i = 0; i < authk_size_in_word; i++, pce++)
+		pce->data = mackey32[i];
+	pce = cmdlistinfo->auth_iv;
+
+	if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
+		for (i = 0; i < 5; i++, pce++)
+			pce->data = _std_init_vector_sha1[i];
+	else
+		for (i = 0; i < 8; i++, pce++)
+			pce->data = _std_init_vector_sha256[i];
+
+	/* write auth_bytecnt 0/1, start with 0 */
+	pce = cmdlistinfo->auth_bytecount;
+	for (i = 0; i < 2; i++, pce++)
+		pce->data = 0;
+
+	pce = cmdlistinfo->auth_seg_cfg;
+	a_cfg = pce->data;
+	a_cfg &= ~(CRYPTO_AUTH_POS_MASK);
+	if (q_req->dir == QCE_ENCRYPT)
+		a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+	else
+		a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+	pce->data = a_cfg;
+
+	/* write auth seg size */
+	pce = cmdlistinfo->auth_seg_size;
+	pce->data = totallen_in;
+
+	/* write auth seg size start*/
+	pce = cmdlistinfo->auth_seg_start;
+	pce->data = 0;
+
+	/* write seg size */
+	pce = cmdlistinfo->seg_size;
+	pce->data = totallen_in;
+
+	/* write encr seg size */
+	pce = cmdlistinfo->encr_seg_size;
+	pce->data = q_req->cryptlen;
+
+	/* write encr seg start */
+	pce = cmdlistinfo->encr_seg_start;
+	pce->data = (coffset & 0xffff);
+
+	return 0;
+
+}
+
+static struct qce_cmdlist_info *_ce_get_cipher_cmdlistinfo(
+			struct qce_device *pce_dev,
+			int req_info, struct qce_req *creq)
+{
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	cmdlistptr = &pce_sps_data->cmdlistptr;
+	if (creq->alg != CIPHER_ALG_AES) {
+		switch (creq->alg) {
+		case CIPHER_ALG_DES:
+			if (creq->mode == QCE_MODE_ECB)
+				return &cmdlistptr->cipher_des_ecb;
+			return &cmdlistptr->cipher_des_cbc;
+		case CIPHER_ALG_3DES:
+			if (creq->mode == QCE_MODE_ECB)
+				return &cmdlistptr->cipher_3des_ecb;
+			return &cmdlistptr->cipher_3des_cbc;
+		default:
+			return NULL;
+		}
+	} else {
+		switch (creq->mode) {
+		case QCE_MODE_ECB:
+			if (creq->encklen == AES128_KEY_SIZE)
+				return &cmdlistptr->cipher_aes_128_ecb;
+			return &cmdlistptr->cipher_aes_256_ecb;
+		case QCE_MODE_CBC:
+		case QCE_MODE_CTR:
+			if (creq->encklen == AES128_KEY_SIZE)
+				return &cmdlistptr->cipher_aes_128_cbc_ctr;
+			return &cmdlistptr->cipher_aes_256_cbc_ctr;
+		case QCE_MODE_XTS:
+			if (creq->encklen/2 == AES128_KEY_SIZE)
+				return &cmdlistptr->cipher_aes_128_xts;
+			return &cmdlistptr->cipher_aes_256_xts;
+		case QCE_MODE_CCM:
+			if (creq->encklen == AES128_KEY_SIZE)
+				return &cmdlistptr->aead_aes_128_ccm;
+			return &cmdlistptr->aead_aes_256_ccm;
+		default:
+			return NULL;
+		}
+	}
+	return NULL;
+}
+
+static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
+		uint32_t totallen_in, uint32_t coffset,
+		struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
+			0, 0, 0, 0};
+	uint32_t enck_size_in_word = 0;
+	uint32_t key_size;
+	bool use_hw_key = false;
+	bool use_pipe_key = false;
+	uint32_t encr_cfg = 0;
+	uint32_t ivsize = creq->ivsize;
+	int i;
+	struct sps_command_element *pce = NULL;
+
+	if (creq->mode == QCE_MODE_XTS)
+		key_size = creq->encklen/2;
+	else
+		key_size = creq->encklen;
+
+	pce = cmdlistinfo->go_proc;
+	if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+		use_hw_key = true;
+	} else {
+		if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+					QCRYPTO_CTX_USE_PIPE_KEY)
+			use_pipe_key = true;
+	}
+	pce = cmdlistinfo->go_proc;
+	if (use_hw_key == true)
+		pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
+						pce_dev->phy_iobase);
+	else
+		pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
+						pce_dev->phy_iobase);
+	if ((use_pipe_key == false) && (use_hw_key == false)) {
+		_byte_stream_to_net_words(enckey32, creq->enckey, key_size);
+		enck_size_in_word = key_size/sizeof(uint32_t);
+	}
+
+	if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
+		uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
+		uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
+		uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
+		uint32_t auth_cfg = 0;
+
+		/* write nonce */
+		_byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
+		pce = cmdlistinfo->auth_nonce_info;
+		for (i = 0; i < noncelen32; i++, pce++)
+			pce->data = nonce32[i];
+
+		if (creq->authklen ==  AES128_KEY_SIZE)
+			auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
+		else {
+			if (creq->authklen ==  AES256_KEY_SIZE)
+				auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
+		}
+		if (creq->dir == QCE_ENCRYPT)
+			auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+		else
+			auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+		auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
+
+		if (use_hw_key == true)	{
+			auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
+		} else {
+			auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+			/* write auth key */
+			pce = cmdlistinfo->auth_key;
+			for (i = 0; i < authklen32; i++, pce++)
+				pce->data = enckey32[i];
+		}
+
+		pce = cmdlistinfo->auth_seg_cfg;
+		pce->data = auth_cfg;
+
+		pce = cmdlistinfo->auth_seg_size;
+		if (creq->dir == QCE_ENCRYPT)
+			pce->data = totallen_in;
+		else
+			pce->data = totallen_in - creq->authsize;
+		pce = cmdlistinfo->auth_seg_start;
+		pce->data = 0;
+	} else {
+		if (creq->op != QCE_REQ_AEAD) {
+			pce = cmdlistinfo->auth_seg_cfg;
+			pce->data = 0;
+		}
+	}
+	switch (creq->mode) {
+	case QCE_MODE_ECB:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
+		break;
+	case QCE_MODE_CBC:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
+		break;
+	case QCE_MODE_XTS:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
+		break;
+	case QCE_MODE_CCM:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
+		encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
+				(CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
+		break;
+	case QCE_MODE_CTR:
+	default:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
+		break;
+	}
+
+	switch (creq->alg) {
+	case CIPHER_ALG_DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			pce = cmdlistinfo->encr_cntr_iv;
+			pce->data = enciv32[0];
+			pce++;
+			pce->data = enciv32[1];
+		}
+		if (use_hw_key == false) {
+			pce = cmdlistinfo->encr_key;
+			pce->data = enckey32[0];
+			pce++;
+			pce->data = enckey32[1];
+		}
+		break;
+	case CIPHER_ALG_3DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			pce = cmdlistinfo->encr_cntr_iv;
+			pce->data = enciv32[0];
+			pce++;
+			pce->data = enciv32[1];
+		}
+		if (use_hw_key == false) {
+			/* write encr key */
+			pce = cmdlistinfo->encr_key;
+			for (i = 0; i < 6; i++, pce++)
+				pce->data = enckey32[i];
+		}
+		break;
+	case CIPHER_ALG_AES:
+	default:
+		if (creq->mode ==  QCE_MODE_XTS) {
+			uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
+					= {0, 0, 0, 0, 0, 0, 0, 0};
+			uint32_t xtsklen =
+					creq->encklen/(2 * sizeof(uint32_t));
+
+			if ((use_hw_key == false) && (use_pipe_key == false)) {
+				_byte_stream_to_net_words(xtskey32,
+					(creq->enckey + creq->encklen/2),
+							creq->encklen/2);
+				/* write xts encr key */
+				pce = cmdlistinfo->encr_xts_key;
+				for (i = 0; i < xtsklen; i++, pce++)
+					pce->data = xtskey32[i];
+			}
+			/* write xts du size */
+			pce = cmdlistinfo->encr_xts_du_size;
+			switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
+			case QCRYPTO_CTX_XTS_DU_SIZE_512B:
+				pce->data = min((unsigned int)QCE_SECTOR_SIZE,
+						creq->cryptlen);
+				break;
+			case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
+				pce->data =
+					min((unsigned int)QCE_SECTOR_SIZE * 2,
+					creq->cryptlen);
+				break;
+			default:
+				pce->data = creq->cryptlen;
+				break;
+			}
+		}
+		if (creq->mode !=  QCE_MODE_ECB) {
+			if (creq->mode ==  QCE_MODE_XTS)
+				_byte_stream_swap_to_net_words(enciv32,
+							creq->iv, ivsize);
+			else
+				_byte_stream_to_net_words(enciv32, creq->iv,
+								ivsize);
+			/* write encr cntr iv */
+			pce = cmdlistinfo->encr_cntr_iv;
+			for (i = 0; i < 4; i++, pce++)
+				pce->data = enciv32[i];
+
+			if (creq->mode ==  QCE_MODE_CCM) {
+				/* write cntr iv for ccm */
+				pce = cmdlistinfo->encr_ccm_cntr_iv;
+				for (i = 0; i < 4; i++, pce++)
+					pce->data = enciv32[i];
+				/* update cntr_iv[3] by one */
+				pce = cmdlistinfo->encr_cntr_iv;
+				pce += 3;
+				pce->data += 1;
+			}
+		}
+
+		if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
+			encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+					CRYPTO_ENCR_KEY_SZ);
+		} else {
+			if (use_hw_key == false) {
+				/* write encr key */
+				pce = cmdlistinfo->encr_key;
+				for (i = 0; i < enck_size_in_word; i++, pce++)
+					pce->data = enckey32[i];
+			}
+		} /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
+		break;
+	} /* end of switch (creq->mode)  */
+
+	if (use_pipe_key)
+		encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
+					<< CRYPTO_USE_PIPE_KEY_ENCR);
+
+	/* write encr seg cfg */
+	pce = cmdlistinfo->encr_seg_cfg;
+	if ((creq->alg == CIPHER_ALG_DES) || (creq->alg == CIPHER_ALG_3DES)) {
+		if (creq->dir == QCE_ENCRYPT)
+			pce->data |= (1 << CRYPTO_ENCODE);
+		else
+			pce->data &= ~(1 << CRYPTO_ENCODE);
+		encr_cfg = pce->data;
+	}  else	{
+		encr_cfg |=
+			((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
+	}
+	if (use_hw_key == true)
+		encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+	else
+		encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+	pce->data = encr_cfg;
+
+	/* write encr seg size */
+	pce = cmdlistinfo->encr_seg_size;
+	if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
+		pce->data = (creq->cryptlen + creq->authsize);
+	else
+		pce->data = creq->cryptlen;
+
+	/* write encr seg start */
+	pce = cmdlistinfo->encr_seg_start;
+	pce->data = (coffset & 0xffff);
+
+	/* write seg size  */
+	pce = cmdlistinfo->seg_size;
+	pce->data = totallen_in;
+
+	return 0;
+};
+
+static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req *req,
+		struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+	uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+	uint32_t cfg;
+	struct sps_command_element *pce;
+	int i;
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		cfg = pce_dev->reg.auth_cfg_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		cfg = pce_dev->reg.auth_cfg_snow3g;
+		break;
+	};
+
+	/* write key in CRYPTO_AUTH_IV0-3_REG */
+	_byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
+	pce = cmdlistinfo->auth_iv;
+	for (i = 0; i < key_size_in_word; i++, pce++)
+		pce->data = ikey32[i];
+
+	/* write last bits  in CRYPTO_AUTH_IV4_REG  */
+	pce->data = req->last_bits;
+
+	/* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
+	pce = cmdlistinfo->auth_bytecount;
+	pce->data = req->fresh;
+
+	/* write count-i  to CRYPTO_AUTH_BYTECNT1_REG */
+	pce++;
+	pce->data = req->count_i;
+
+	/* write auth seg cfg */
+	pce = cmdlistinfo->auth_seg_cfg;
+	if (req->direction == QCE_OTA_DIR_DOWNLINK)
+		cfg |= BIT(CRYPTO_F9_DIRECTION);
+	pce->data = cfg;
+
+	/* write auth seg size */
+	pce = cmdlistinfo->auth_seg_size;
+	pce->data = req->msize;
+
+	/* write auth seg start*/
+	pce = cmdlistinfo->auth_seg_start;
+	pce->data = 0;
+
+	/* write seg size  */
+	pce = cmdlistinfo->seg_size;
+	pce->data = req->msize;
+
+
+	/* write go */
+	pce = cmdlistinfo->go_proc;
+	pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
+	return 0;
+}
+
+static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
+		bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset,
+		uint16_t cipher_size,
+		struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+	uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+	uint32_t cfg;
+	struct sps_command_element *pce;
+	int i;
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		cfg = pce_dev->reg.encr_cfg_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		cfg = pce_dev->reg.encr_cfg_snow3g;
+		break;
+	};
+	/* write key */
+	_byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
+	pce = cmdlistinfo->encr_key;
+	for (i = 0; i < key_size_in_word; i++, pce++)
+		pce->data = ckey32[i];
+
+	/* write encr seg cfg */
+	pce = cmdlistinfo->encr_seg_cfg;
+	if (key_stream_mode)
+		cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
+	if (req->direction == QCE_OTA_DIR_DOWNLINK)
+		cfg |= BIT(CRYPTO_F8_DIRECTION);
+	pce->data = cfg;
+
+	/* write encr seg start */
+	pce = cmdlistinfo->encr_seg_start;
+	pce->data = (cipher_offset & 0xffff);
+
+	/* write encr seg size  */
+	pce = cmdlistinfo->encr_seg_size;
+	pce->data = cipher_size;
+
+	/* write seg size  */
+	pce = cmdlistinfo->seg_size;
+	pce->data = req->data_len;
+
+	/* write cntr0_iv0 for countC */
+	pce = cmdlistinfo->encr_cntr_iv;
+	pce->data = req->count_c;
+	/* write cntr1_iv1 for nPkts, and bearer */
+	pce++;
+	if (npkts == 1)
+		npkts = 0;
+	pce->data = req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
+				npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT;
+
+	/* write go */
+	pce = cmdlistinfo->go_proc;
+	pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
+
+	return 0;
+}
+
+static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info)
+{
+	int i, j, ents;
+	struct ce_sps_data *pce_sps_data;
+	struct sps_iovec *iovec;
+	uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD;
+
+	pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+	iovec = pce_sps_data->in_transfer.iovec;
+	pr_info("==============================================\n");
+	pr_info("CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n");
+	pr_info("==============================================\n");
+	for (i = 0; i <  pce_sps_data->in_transfer.iovec_count; i++) {
+		pr_info(" [%d] addr=0x%x  size=0x%x  flags=0x%x\n", i,
+					iovec->addr, iovec->size, iovec->flags);
+		if (iovec->flags & cmd_flags) {
+			struct sps_command_element *pced;
+
+			pced = (struct sps_command_element *)
+					(GET_VIRT_ADDR(iovec->addr));
+			ents = iovec->size/(sizeof(struct sps_command_element));
+			for (j = 0; j < ents; j++) {
+				pr_info("      [%d] [0x%x] 0x%x\n", j,
+					pced->addr, pced->data);
+				pced++;
+			}
+		}
+		iovec++;
+	}
+
+	pr_info("==============================================\n");
+	pr_info("PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n");
+	pr_info("==============================================\n");
+	iovec =  pce_sps_data->out_transfer.iovec;
+	for (i = 0; i <   pce_sps_data->out_transfer.iovec_count; i++) {
+		pr_info(" [%d] addr=0x%x  size=0x%x  flags=0x%x\n", i,
+				iovec->addr, iovec->size, iovec->flags);
+		iovec++;
+	}
+}
+
+#ifdef QCE_DEBUG
+
+static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
+{
+	_qce_dump_descr_fifos(pce_dev, req_info);
+}
+
+#define QCE_WRITE_REG(val, addr)					\
+{									\
+	pr_info("      [0x%p] 0x%x\n", addr, (uint32_t)val);		\
+	writel_relaxed(val, addr);					\
+}
+
+#else
+
+static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
+{
+}
+
+#define QCE_WRITE_REG(val, addr)					\
+	writel_relaxed(val, addr)
+
+#endif
+
+static int _ce_setup_hash_direct(struct qce_device *pce_dev,
+				struct qce_sha_req *sreq)
+{
+	uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
+	uint32_t diglen;
+	bool use_hw_key = false;
+	bool use_pipe_key = false;
+	int i;
+	uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
+	bool sha1 = false;
+	uint32_t auth_cfg = 0;
+
+	/* clear status */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/*
+	 * Ensure previous instructions (setting the CONFIG register)
+	 * was completed before issuing starting to set other config register
+	 * This is to ensure the configurations are done in correct endian-ness
+	 * as set in the CONFIG registers
+	 */
+	mb();
+
+	if (sreq->alg == QCE_HASH_AES_CMAC) {
+		/* write seg_cfg */
+		QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+		/* write seg_cfg */
+		QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+		/* write seg_cfg */
+		QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+
+		/* Clear auth_ivn, auth_keyn registers  */
+		for (i = 0; i < 16; i++) {
+			QCE_WRITE_REG(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+			QCE_WRITE_REG(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
+		}
+		/* write auth_bytecnt 0/1/2/3, start with 0 */
+		for (i = 0; i < 4; i++)
+			QCE_WRITE_REG(0, pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT0_REG +
+						i * sizeof(uint32_t));
+
+		if (sreq->authklen == AES128_KEY_SIZE)
+			auth_cfg = pce_dev->reg.auth_cfg_cmac_128;
+		else
+			auth_cfg = pce_dev->reg.auth_cfg_cmac_256;
+	}
+
+	if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC) ||
+			(sreq->alg ==  QCE_HASH_AES_CMAC)) {
+
+		_byte_stream_to_net_words(mackey32, sreq->authkey,
+						sreq->authklen);
+
+		/* no more check for null key. use flag to check*/
+
+		if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY) ==
+					QCRYPTO_CTX_USE_HW_KEY) {
+			use_hw_key = true;
+		} else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+						QCRYPTO_CTX_USE_PIPE_KEY) {
+			use_pipe_key = true;
+		} else {
+			/* setup key */
+			for (i = 0; i < authk_size_in_word; i++)
+				QCE_WRITE_REG(mackey32[i], (pce_dev->iobase +
+					(CRYPTO_AUTH_KEY0_REG +
+							i*sizeof(uint32_t))));
+		}
+	}
+
+	if (sreq->alg ==  QCE_HASH_AES_CMAC)
+		goto go_proc;
+
+	/* if not the last, the size has to be on the block boundary */
+	if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
+		return -EIO;
+
+	switch (sreq->alg) {
+	case QCE_HASH_SHA1:
+		auth_cfg = pce_dev->reg.auth_cfg_sha1;
+		diglen = SHA1_DIGEST_SIZE;
+		sha1 = true;
+		break;
+	case QCE_HASH_SHA1_HMAC:
+		auth_cfg = pce_dev->reg.auth_cfg_hmac_sha1;
+		diglen = SHA1_DIGEST_SIZE;
+		sha1 = true;
+		break;
+	case QCE_HASH_SHA256:
+		auth_cfg = pce_dev->reg.auth_cfg_sha256;
+		diglen = SHA256_DIGEST_SIZE;
+		break;
+	case QCE_HASH_SHA256_HMAC:
+		auth_cfg = pce_dev->reg.auth_cfg_hmac_sha256;
+		diglen = SHA256_DIGEST_SIZE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
+	if (sreq->first_blk) {
+		if (sha1) {
+			for (i = 0; i < 5; i++)
+				auth32[i] = _std_init_vector_sha1[i];
+		} else {
+			for (i = 0; i < 8; i++)
+				auth32[i] = _std_init_vector_sha256[i];
+		}
+	} else {
+		_byte_stream_to_net_words(auth32, sreq->digest, diglen);
+	}
+
+	/* Set auth_ivn, auth_keyn registers  */
+	for (i = 0; i < 5; i++)
+		QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
+			(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+
+	if ((sreq->alg == QCE_HASH_SHA256) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC)) {
+		for (i = 5; i < 8; i++)
+			QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+	}
+
+
+	/* write auth_bytecnt 0/1/2/3, start with 0 */
+	for (i = 0; i < 2; i++)
+		QCE_WRITE_REG(sreq->auth_data[i], pce_dev->iobase +
+					CRYPTO_AUTH_BYTECNT0_REG +
+						i * sizeof(uint32_t));
+
+	/* Set/reset  last bit in CFG register  */
+	if (sreq->last_blk)
+		auth_cfg |= 1 << CRYPTO_LAST;
+	else
+		auth_cfg &= ~(1 << CRYPTO_LAST);
+	if (sreq->first_blk)
+		auth_cfg |= 1 << CRYPTO_FIRST;
+	else
+		auth_cfg &= ~(1 << CRYPTO_FIRST);
+	if (use_hw_key)
+		auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
+	if (use_pipe_key)
+		auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
+go_proc:
+	 /* write seg_cfg */
+	QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+	/* write auth seg_size   */
+	QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+	/* write auth_seg_start   */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+
+	/* reset encr seg_cfg   */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write seg_size   */
+	QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/* issue go to crypto   */
+	if (use_hw_key == false) {
+		QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+				(1 << CRYPTO_CLR_CNTXT)),
+				pce_dev->iobase + CRYPTO_GOPROC_REG);
+	} else {
+		QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+				pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
+	}
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+}
+
+static int _ce_setup_aead_direct(struct qce_device *pce_dev,
+		struct qce_req *q_req, uint32_t totallen_in, uint32_t coffset)
+{
+	int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
+	int i;
+	uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
+	uint32_t a_cfg;
+	uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
+	uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
+	uint32_t enck_size_in_word = 0;
+	uint32_t enciv_in_word;
+	uint32_t key_size;
+	uint32_t ivsize = q_req->ivsize;
+	uint32_t encr_cfg;
+
+
+	/* clear status */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/*
+	 * Ensure previous instructions (setting the CONFIG register)
+	 * was completed before issuing starting to set other config register
+	 * This is to ensure the configurations are done in correct endian-ness
+	 * as set in the CONFIG registers
+	 */
+	mb();
+
+	key_size = q_req->encklen;
+	enck_size_in_word = key_size/sizeof(uint32_t);
+
+	switch (q_req->alg) {
+
+	case CIPHER_ALG_DES:
+
+		switch (q_req->mode) {
+		case QCE_MODE_CBC:
+			encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		enciv_in_word = 2;
+		break;
+
+	case CIPHER_ALG_3DES:
+
+		switch (q_req->mode) {
+		case QCE_MODE_CBC:
+			encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		enciv_in_word = 2;
+
+		break;
+
+	case CIPHER_ALG_AES:
+
+		switch (q_req->mode) {
+		case QCE_MODE_CBC:
+			if (key_size == AES128_KEY_SIZE)
+				encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
+			else if (key_size  == AES256_KEY_SIZE)
+				encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
+			else
+				return -EINVAL;
+			break;
+		default:
+		return -EINVAL;
+		}
+
+		enciv_in_word = 4;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+
+
+
+	/* write CNTR0_IV0_REG */
+	if (q_req->mode !=  QCE_MODE_ECB) {
+		_byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+		for (i = 0; i < enciv_in_word; i++)
+			QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
+	}
+
+	/*
+	 * write encr key
+	 * do not use  hw key or pipe key
+	 */
+	_byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
+	for (i = 0; i < enck_size_in_word; i++)
+		QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)));
+
+	/* write encr seg cfg */
+	if (q_req->dir == QCE_ENCRYPT)
+		encr_cfg |= (1 << CRYPTO_ENCODE);
+	QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* we only support sha1-hmac and sha256-hmac at this point */
+	_byte_stream_to_net_words(mackey32, q_req->authkey,
+					q_req->authklen);
+	for (i = 0; i < authk_size_in_word; i++)
+		QCE_WRITE_REG(mackey32[i], pce_dev->iobase +
+			(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)));
+
+	if (q_req->auth_alg == QCE_HASH_SHA1_HMAC) {
+		for (i = 0; i < 5; i++)
+			QCE_WRITE_REG(_std_init_vector_sha1[i],
+				pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
+	} else {
+		for (i = 0; i < 8; i++)
+			QCE_WRITE_REG(_std_init_vector_sha256[i],
+				pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
+	}
+
+	/* write auth_bytecnt 0/1, start with 0 */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG);
+
+	/* write encr seg size    */
+	QCE_WRITE_REG(q_req->cryptlen, pce_dev->iobase +
+			CRYPTO_ENCR_SEG_SIZE_REG);
+
+	/* write encr start   */
+	QCE_WRITE_REG(coffset & 0xffff, pce_dev->iobase +
+			CRYPTO_ENCR_SEG_START_REG);
+
+	if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
+		a_cfg = pce_dev->reg.auth_cfg_aead_sha1_hmac;
+	else
+		a_cfg = pce_dev->reg.auth_cfg_aead_sha256_hmac;
+
+	if (q_req->dir == QCE_ENCRYPT)
+		a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+	else
+		a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	/* write auth seg_cfg */
+	QCE_WRITE_REG(a_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+	/* write auth seg_size   */
+	QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+	/* write auth_seg_start   */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+
+
+	/* write seg_size   */
+	QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+
+							CRYPTO_CONFIG_REG));
+	/* issue go to crypto   */
+	QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+				(1 << CRYPTO_CLR_CNTXT)),
+				pce_dev->iobase + CRYPTO_GOPROC_REG);
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+};
+
+static int _ce_setup_cipher_direct(struct qce_device *pce_dev,
+		struct qce_req *creq, uint32_t totallen_in, uint32_t coffset)
+{
+	uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
+			0, 0, 0, 0};
+	uint32_t enck_size_in_word = 0;
+	uint32_t key_size;
+	bool use_hw_key = false;
+	bool use_pipe_key = false;
+	uint32_t encr_cfg = 0;
+	uint32_t ivsize = creq->ivsize;
+	int i;
+
+	/* clear status */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/*
+	 * Ensure previous instructions (setting the CONFIG register)
+	 * was completed before issuing starting to set other config register
+	 * This is to ensure the configurations are done in correct endian-ness
+	 * as set in the CONFIG registers
+	 */
+	mb();
+
+	if (creq->mode == QCE_MODE_XTS)
+		key_size = creq->encklen/2;
+	else
+		key_size = creq->encklen;
+
+	if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+		use_hw_key = true;
+	} else {
+		if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+					QCRYPTO_CTX_USE_PIPE_KEY)
+			use_pipe_key = true;
+	}
+	if ((use_pipe_key == false) && (use_hw_key == false)) {
+		_byte_stream_to_net_words(enckey32, creq->enckey, key_size);
+		enck_size_in_word = key_size/sizeof(uint32_t);
+	}
+	if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
+		uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
+		uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
+		uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
+		uint32_t auth_cfg = 0;
+
+		/* Clear auth_ivn, auth_keyn registers  */
+		for (i = 0; i < 16; i++) {
+			QCE_WRITE_REG(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+			QCE_WRITE_REG(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
+		}
+		/* write auth_bytecnt 0/1/2/3, start with 0 */
+		for (i = 0; i < 4; i++)
+			QCE_WRITE_REG(0, pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT0_REG +
+						i * sizeof(uint32_t));
+		/* write nonce */
+		_byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
+		for (i = 0; i < noncelen32; i++)
+			QCE_WRITE_REG(nonce32[i], pce_dev->iobase +
+				CRYPTO_AUTH_INFO_NONCE0_REG +
+					(i*sizeof(uint32_t)));
+
+		if (creq->authklen ==  AES128_KEY_SIZE)
+			auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
+		else {
+			if (creq->authklen ==  AES256_KEY_SIZE)
+				auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
+		}
+		if (creq->dir == QCE_ENCRYPT)
+			auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+		else
+			auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+		auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
+
+		if (use_hw_key == true)	{
+			auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
+		} else {
+			auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+			/* write auth key */
+			for (i = 0; i < authklen32; i++)
+				QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
+				CRYPTO_AUTH_KEY0_REG + (i*sizeof(uint32_t)));
+		}
+		QCE_WRITE_REG(auth_cfg, pce_dev->iobase +
+						CRYPTO_AUTH_SEG_CFG_REG);
+		if (creq->dir == QCE_ENCRYPT) {
+			QCE_WRITE_REG(totallen_in, pce_dev->iobase +
+						CRYPTO_AUTH_SEG_SIZE_REG);
+		} else {
+			QCE_WRITE_REG((totallen_in - creq->authsize),
+				pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+		}
+		QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+	} else {
+		if (creq->op != QCE_REQ_AEAD)
+			QCE_WRITE_REG(0, pce_dev->iobase +
+						CRYPTO_AUTH_SEG_CFG_REG);
+	}
+	/*
+	 * Ensure previous instructions (write to all AUTH registers)
+	 * was completed before accessing a register that is not in
+	 * in the same 1K range.
+	 */
+	mb();
+	switch (creq->mode) {
+	case QCE_MODE_ECB:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
+		break;
+	case QCE_MODE_CBC:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
+		break;
+	case QCE_MODE_XTS:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
+		break;
+	case QCE_MODE_CCM:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
+		break;
+	case QCE_MODE_CTR:
+	default:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
+		break;
+	}
+
+	switch (creq->alg) {
+	case CIPHER_ALG_DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
+						CRYPTO_CNTR0_IV0_REG);
+			QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
+						CRYPTO_CNTR1_IV1_REG);
+		} else {
+			encr_cfg = pce_dev->reg.encr_cfg_des_ecb;
+		}
+		if (use_hw_key == false) {
+			QCE_WRITE_REG(enckey32[0], pce_dev->iobase +
+							CRYPTO_ENCR_KEY0_REG);
+			QCE_WRITE_REG(enckey32[1], pce_dev->iobase +
+							CRYPTO_ENCR_KEY1_REG);
+		}
+		break;
+	case CIPHER_ALG_3DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
+						CRYPTO_CNTR0_IV0_REG);
+			QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
+						CRYPTO_CNTR1_IV1_REG);
+			encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
+		} else {
+			encr_cfg = pce_dev->reg.encr_cfg_3des_ecb;
+		}
+		if (use_hw_key == false) {
+			/* write encr key */
+			for (i = 0; i < 6; i++)
+				QCE_WRITE_REG(enckey32[0], (pce_dev->iobase +
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t))));
+		}
+		break;
+	case CIPHER_ALG_AES:
+	default:
+		if (creq->mode ==  QCE_MODE_XTS) {
+			uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
+					= {0, 0, 0, 0, 0, 0, 0, 0};
+			uint32_t xtsklen =
+					creq->encklen/(2 * sizeof(uint32_t));
+
+			if ((use_hw_key == false) && (use_pipe_key == false)) {
+				_byte_stream_to_net_words(xtskey32,
+					(creq->enckey + creq->encklen/2),
+							creq->encklen/2);
+				/* write xts encr key */
+				for (i = 0; i < xtsklen; i++)
+					QCE_WRITE_REG(xtskey32[i],
+						pce_dev->iobase +
+						CRYPTO_ENCR_XTS_KEY0_REG +
+						(i * sizeof(uint32_t)));
+			}
+			/* write xts du size */
+			switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
+			case QCRYPTO_CTX_XTS_DU_SIZE_512B:
+				QCE_WRITE_REG(
+					min((uint32_t)QCE_SECTOR_SIZE,
+					creq->cryptlen), pce_dev->iobase +
+					CRYPTO_ENCR_XTS_DU_SIZE_REG);
+				break;
+			case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
+				QCE_WRITE_REG(
+					min((uint32_t)(QCE_SECTOR_SIZE * 2),
+					creq->cryptlen), pce_dev->iobase +
+					CRYPTO_ENCR_XTS_DU_SIZE_REG);
+				break;
+			default:
+				QCE_WRITE_REG(creq->cryptlen,
+					pce_dev->iobase +
+					CRYPTO_ENCR_XTS_DU_SIZE_REG);
+				break;
+			}
+		}
+		if (creq->mode !=  QCE_MODE_ECB) {
+			if (creq->mode ==  QCE_MODE_XTS)
+				_byte_stream_swap_to_net_words(enciv32,
+							creq->iv, ivsize);
+			else
+				_byte_stream_to_net_words(enciv32, creq->iv,
+								ivsize);
+
+			/* write encr cntr iv */
+			for (i = 0; i <= 3; i++)
+				QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
+							CRYPTO_CNTR0_IV0_REG +
+							(i * sizeof(uint32_t)));
+
+			if (creq->mode == QCE_MODE_CCM) {
+				/* write cntr iv for ccm */
+				for (i = 0; i <= 3; i++)
+					QCE_WRITE_REG(enciv32[i],
+						pce_dev->iobase +
+						CRYPTO_ENCR_CCM_INT_CNTR0_REG +
+							(i * sizeof(uint32_t)));
+				/* update cntr_iv[3] by one */
+				QCE_WRITE_REG((enciv32[3] + 1),
+							pce_dev->iobase +
+							CRYPTO_CNTR0_IV0_REG +
+							(3 * sizeof(uint32_t)));
+			}
+		}
+
+		if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
+			encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+					CRYPTO_ENCR_KEY_SZ);
+		} else {
+			if ((use_hw_key == false) && (use_pipe_key == false)) {
+				for (i = 0; i < enck_size_in_word; i++)
+					QCE_WRITE_REG(enckey32[i],
+						pce_dev->iobase +
+						CRYPTO_ENCR_KEY0_REG +
+						(i * sizeof(uint32_t)));
+			}
+		} /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
+		break;
+	} /* end of switch (creq->mode)  */
+
+	if (use_pipe_key)
+		encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
+					<< CRYPTO_USE_PIPE_KEY_ENCR);
+
+	/* write encr seg cfg */
+	encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
+	if (use_hw_key == true)
+		encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+	else
+		encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+	/* write encr seg cfg */
+	QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write encr seg size */
+	if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT)) {
+		QCE_WRITE_REG((creq->cryptlen + creq->authsize),
+				pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+	} else {
+		QCE_WRITE_REG(creq->cryptlen,
+				pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+	}
+
+	/* write encr seg start */
+	QCE_WRITE_REG((coffset & 0xffff),
+			pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG);
+
+	/* write encr counter mask */
+	QCE_WRITE_REG(0xffffffff,
+			pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
+	QCE_WRITE_REG(0xffffffff,
+			pce_dev->iobase + CRYPTO_CNTR_MASK_REG0);
+	QCE_WRITE_REG(0xffffffff,
+			pce_dev->iobase + CRYPTO_CNTR_MASK_REG1);
+	QCE_WRITE_REG(0xffffffff,
+			pce_dev->iobase + CRYPTO_CNTR_MASK_REG2);
+
+	/* write seg size  */
+	QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/* issue go to crypto   */
+	if (use_hw_key == false) {
+		QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+				(1 << CRYPTO_CLR_CNTXT)),
+				pce_dev->iobase + CRYPTO_GOPROC_REG);
+	} else {
+		QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+				pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
+	}
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+};
+
+static int _ce_f9_setup_direct(struct qce_device *pce_dev,
+				 struct qce_f9_req *req)
+{
+	uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+	uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+	uint32_t auth_cfg;
+	int i;
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		auth_cfg = pce_dev->reg.auth_cfg_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		auth_cfg = pce_dev->reg.auth_cfg_snow3g;
+		break;
+	};
+
+	/* clear status */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+	/* set big endian configuration */
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/*
+	 * Ensure previous instructions (setting the CONFIG register)
+	 * was completed before issuing starting to set other config register
+	 * This is to ensure the configurations are done in correct endian-ness
+	 * as set in the CONFIG registers
+	 */
+	mb();
+
+	/* write enc_seg_cfg */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write ecn_seg_size */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+
+	/* write key in CRYPTO_AUTH_IV0-3_REG */
+	_byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
+	for (i = 0; i < key_size_in_word; i++)
+		QCE_WRITE_REG(ikey32[i], (pce_dev->iobase +
+			(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+
+	/* write last bits  in CRYPTO_AUTH_IV4_REG  */
+	QCE_WRITE_REG(req->last_bits, (pce_dev->iobase +
+					CRYPTO_AUTH_IV4_REG));
+
+	/* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
+	QCE_WRITE_REG(req->fresh, (pce_dev->iobase +
+					 CRYPTO_AUTH_BYTECNT0_REG));
+
+	/* write count-i  to CRYPTO_AUTH_BYTECNT1_REG */
+	QCE_WRITE_REG(req->count_i, (pce_dev->iobase +
+					 CRYPTO_AUTH_BYTECNT1_REG));
+
+	/* write auth seg cfg */
+	if (req->direction == QCE_OTA_DIR_DOWNLINK)
+		auth_cfg |= BIT(CRYPTO_F9_DIRECTION);
+	QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+	/* write auth seg size */
+	QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+	/* write auth seg start*/
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+
+	/* write seg size  */
+	QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	/* set little endian configuration before go*/
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/* write go */
+	QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+				(1 << CRYPTO_CLR_CNTXT)),
+				pce_dev->iobase +  CRYPTO_GOPROC_REG);
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+}
+
+static int _ce_f8_setup_direct(struct qce_device *pce_dev,
+		struct qce_f8_req *req, bool key_stream_mode,
+		uint16_t npkts, uint16_t cipher_offset, uint16_t cipher_size)
+{
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+	uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		encr_cfg = pce_dev->reg.encr_cfg_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		encr_cfg = pce_dev->reg.encr_cfg_snow3g;
+		break;
+	};
+	/* clear status */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+	/* set big endian configuration */
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/* write auth seg configuration */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+	/* write auth seg size */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+	/* write key */
+	_byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
+
+	for (i = 0; i < key_size_in_word; i++)
+		QCE_WRITE_REG(ckey32[i], (pce_dev->iobase +
+			(CRYPTO_ENCR_KEY0_REG + i*sizeof(uint32_t))));
+	/* write encr seg cfg */
+	if (key_stream_mode)
+		encr_cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
+	if (req->direction == QCE_OTA_DIR_DOWNLINK)
+		encr_cfg |= BIT(CRYPTO_F8_DIRECTION);
+	QCE_WRITE_REG(encr_cfg, pce_dev->iobase +
+		CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write encr seg start */
+	QCE_WRITE_REG((cipher_offset & 0xffff), pce_dev->iobase +
+		CRYPTO_ENCR_SEG_START_REG);
+	/* write encr seg size  */
+	QCE_WRITE_REG(cipher_size, pce_dev->iobase +
+		CRYPTO_ENCR_SEG_SIZE_REG);
+
+	/* write seg size  */
+	QCE_WRITE_REG(req->data_len, pce_dev->iobase +
+		CRYPTO_SEG_SIZE_REG);
+
+	/* write cntr0_iv0 for countC */
+	QCE_WRITE_REG(req->count_c, pce_dev->iobase +
+		CRYPTO_CNTR0_IV0_REG);
+	/* write cntr1_iv1 for nPkts, and bearer */
+	if (npkts == 1)
+		npkts = 0;
+	QCE_WRITE_REG(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
+				npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT,
+			pce_dev->iobase + CRYPTO_CNTR1_IV1_REG);
+
+	/* set little endian configuration before go*/
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/* write go */
+	QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+				(1 << CRYPTO_CLR_CNTXT)),
+				pce_dev->iobase +  CRYPTO_GOPROC_REG);
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+}
+
+
+static int _qce_unlock_other_pipes(struct qce_device *pce_dev, int req_info)
+{
+	int rc = 0;
+	struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info
+						[req_info].ce_sps;
+
+	if (pce_dev->no_get_around || pce_dev->support_cmd_dscr == false)
+		return rc;
+
+	rc = sps_transfer_one(pce_dev->ce_bam_info.consumer.pipe,
+		GET_PHYS_ADDR(pce_sps_data->
+				cmdlistptr.unlock_all_pipes.cmdlist),
+		0, NULL, (SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_UNLOCK));
+	if (rc) {
+		pr_err("sps_xfr_one() fail rc=%d", rc);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
+		bool is_complete);
+
+static int _aead_complete(struct qce_device *pce_dev, int req_info)
+{
+	struct aead_request *areq;
+	unsigned char mac[SHA256_DIGEST_SIZE];
+	uint32_t ccm_fail_status = 0;
+	uint32_t result_dump_status;
+	int32_t result_status = 0;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	qce_comp_func_ptr_t qce_callback;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	qce_callback = preq_info->qce_cb;
+	areq = (struct aead_request *) preq_info->areq;
+	if (areq->src != areq->dst) {
+		qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+					DMA_FROM_DEVICE);
+	}
+	qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+
+	if (preq_info->asg)
+		qce_dma_unmap_sg(pce_dev->pdev, preq_info->asg,
+			preq_info->assoc_nents, DMA_TO_DEVICE);
+	/* check MAC */
+	memcpy(mac, (char *)(&pce_sps_data->result->auth_iv[0]),
+						SHA256_DIGEST_SIZE);
+
+	/* read status before unlock */
+	if (preq_info->dir == QCE_DECRYPT) {
+		if (pce_dev->no_get_around)
+			if (pce_dev->no_ccm_mac_status_get_around)
+				ccm_fail_status = be32_to_cpu(pce_sps_data->
+							result->status);
+			else
+				ccm_fail_status = be32_to_cpu(pce_sps_data->
+							result_null->status);
+		else
+			ccm_fail_status = readl_relaxed(pce_dev->iobase +
+					CRYPTO_STATUS_REG);
+	}
+	if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, mac, NULL, -ENXIO);
+		return -ENXIO;
+	}
+	result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+	pce_sps_data->result->status = 0;
+
+	if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+			| (1 <<  CRYPTO_HSD_ERR))) {
+		pr_err("aead operation error. Status %x\n", result_dump_status);
+		result_status = -ENXIO;
+	} else if (pce_sps_data->consumer_status |
+			pce_sps_data->producer_status)  {
+		pr_err("aead sps operation error. sps status %x %x\n",
+				pce_sps_data->consumer_status,
+				pce_sps_data->producer_status);
+		result_status = -ENXIO;
+	}
+
+	if (preq_info->mode == QCE_MODE_CCM) {
+		/*
+		 * Not from result dump, instead, use the status we just
+		 * read of device for MAC_FAILED.
+		 */
+		if (result_status == 0 && (preq_info->dir == QCE_DECRYPT) &&
+				(ccm_fail_status & (1 << CRYPTO_MAC_FAILED)))
+			result_status = -EBADMSG;
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, mac, NULL, result_status);
+
+	} else {
+		uint32_t ivsize = 0;
+		struct crypto_aead *aead;
+		unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
+
+		aead = crypto_aead_reqtfm(areq);
+		ivsize = crypto_aead_ivsize(aead);
+		memcpy(iv, (char *)(pce_sps_data->result->encr_cntr_iv),
+			sizeof(iv));
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, mac, iv, result_status);
+
+	}
+	return 0;
+};
+
+static int _sha_complete(struct qce_device *pce_dev, int req_info)
+{
+	struct ahash_request *areq;
+	unsigned char digest[SHA256_DIGEST_SIZE];
+	uint32_t bytecount32[2];
+	int32_t result_status = 0;
+	uint32_t result_dump_status;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	qce_comp_func_ptr_t qce_callback;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	qce_callback = preq_info->qce_cb;
+	areq = (struct ahash_request *) preq_info->areq;
+	qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+				DMA_TO_DEVICE);
+	memcpy(digest, (char *)(&pce_sps_data->result->auth_iv[0]),
+						SHA256_DIGEST_SIZE);
+	_byte_stream_to_net_words(bytecount32,
+		(unsigned char *)pce_sps_data->result->auth_byte_count,
+					2 * CRYPTO_REG_SIZE);
+
+	if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, digest, (char *)bytecount32,
+				-ENXIO);
+		return -ENXIO;
+	}
+
+	result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+	pce_sps_data->result->status = 0;
+	if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+			| (1 <<  CRYPTO_HSD_ERR))) {
+
+		pr_err("sha operation error. Status %x\n", result_dump_status);
+		result_status = -ENXIO;
+	} else if (pce_sps_data->consumer_status) {
+		pr_err("sha sps operation error. sps status %x\n",
+			pce_sps_data->consumer_status);
+		result_status = -ENXIO;
+	}
+	qce_free_req_info(pce_dev, req_info, true);
+	qce_callback(areq, digest, (char *)bytecount32, result_status);
+	return 0;
+}
+
+static int _f9_complete(struct qce_device *pce_dev, int req_info)
+{
+	uint32_t mac_i;
+	int32_t result_status = 0;
+	uint32_t result_dump_status;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	qce_comp_func_ptr_t qce_callback;
+	void *areq;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	qce_callback = preq_info->qce_cb;
+	areq = preq_info->areq;
+	dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+				preq_info->ota_size, DMA_TO_DEVICE);
+	_byte_stream_to_net_words(&mac_i,
+		(char *)(&pce_sps_data->result->auth_iv[0]),
+		CRYPTO_REG_SIZE);
+
+	if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, NULL, NULL, -ENXIO);
+		return -ENXIO;
+	}
+
+	result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+	pce_sps_data->result->status = 0;
+	if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+				| (1 <<  CRYPTO_HSD_ERR))) {
+		pr_err("f9 operation error. Status %x\n", result_dump_status);
+		result_status = -ENXIO;
+	} else if (pce_sps_data->consumer_status |
+				pce_sps_data->producer_status)  {
+		pr_err("f9 sps operation error. sps status %x %x\n",
+				pce_sps_data->consumer_status,
+				pce_sps_data->producer_status);
+		result_status = -ENXIO;
+	}
+	qce_free_req_info(pce_dev, req_info, true);
+	qce_callback(areq, (char *)&mac_i, NULL, result_status);
+
+	return 0;
+}
+
+static int _ablk_cipher_complete(struct qce_device *pce_dev, int req_info)
+{
+	struct ablkcipher_request *areq;
+	unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
+	int32_t result_status = 0;
+	uint32_t result_dump_status;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	qce_comp_func_ptr_t qce_callback;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	qce_callback = preq_info->qce_cb;
+	areq = (struct ablkcipher_request *) preq_info->areq;
+	if (areq->src != areq->dst) {
+		qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
+			preq_info->dst_nents, DMA_FROM_DEVICE);
+	}
+	qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+		(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+						DMA_TO_DEVICE);
+
+	if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, NULL, NULL, -ENXIO);
+		return -ENXIO;
+	}
+	result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+	pce_sps_data->result->status = 0;
+
+	if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+			| (1 <<  CRYPTO_HSD_ERR))) {
+		pr_err("ablk_cipher operation error. Status %x\n",
+				result_dump_status);
+		result_status = -ENXIO;
+	} else if (pce_sps_data->consumer_status |
+				pce_sps_data->producer_status)  {
+		pr_err("ablk_cipher sps operation error. sps status %x %x\n",
+				pce_sps_data->consumer_status,
+				pce_sps_data->producer_status);
+		result_status = -ENXIO;
+	}
+
+	if (preq_info->mode == QCE_MODE_ECB) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, NULL, NULL, pce_sps_data->consumer_status |
+								result_status);
+	} else {
+		if (pce_dev->ce_bam_info.minor_version == 0) {
+			if (preq_info->mode == QCE_MODE_CBC) {
+				if  (preq_info->dir == QCE_DECRYPT)
+					memcpy(iv, (char *)preq_info->dec_iv,
+								sizeof(iv));
+				else
+					memcpy(iv, (unsigned char *)
+						(sg_virt(areq->src) +
+						areq->src->length - 16),
+						sizeof(iv));
+			}
+			if ((preq_info->mode == QCE_MODE_CTR) ||
+				(preq_info->mode == QCE_MODE_XTS)) {
+				uint32_t num_blk = 0;
+				uint32_t cntr_iv3 = 0;
+				unsigned long long cntr_iv64 = 0;
+				unsigned char *b = (unsigned char *)(&cntr_iv3);
+
+				memcpy(iv, areq->info, sizeof(iv));
+				if (preq_info->mode != QCE_MODE_XTS)
+					num_blk = areq->nbytes/16;
+				else
+					num_blk = 1;
+				cntr_iv3 =  ((*(iv + 12) << 24) & 0xff000000) |
+					(((*(iv + 13)) << 16) & 0xff0000) |
+					(((*(iv + 14)) << 8) & 0xff00) |
+					(*(iv + 15) & 0xff);
+				cntr_iv64 =
+					(((unsigned long long)cntr_iv3 &
+					0xFFFFFFFFULL) +
+					(unsigned long long)num_blk) %
+					(unsigned long long)(0x100000000ULL);
+
+				cntr_iv3 = (u32)(cntr_iv64 & 0xFFFFFFFF);
+				*(iv + 15) = (char)(*b);
+				*(iv + 14) = (char)(*(b + 1));
+				*(iv + 13) = (char)(*(b + 2));
+				*(iv + 12) = (char)(*(b + 3));
+			}
+		} else {
+			memcpy(iv,
+				(char *)(pce_sps_data->result->encr_cntr_iv),
+				sizeof(iv));
+		}
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, NULL, iv, result_status);
+	}
+	return 0;
+}
+
+static int _f8_complete(struct qce_device *pce_dev, int req_info)
+{
+	int32_t result_status = 0;
+	uint32_t result_dump_status;
+	uint32_t result_dump_status2;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	qce_comp_func_ptr_t qce_callback;
+	void *areq;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	qce_callback = preq_info->qce_cb;
+	areq = preq_info->areq;
+	if (preq_info->phy_ota_dst)
+		dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
+				preq_info->ota_size, DMA_FROM_DEVICE);
+	if (preq_info->phy_ota_src)
+		dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+				preq_info->ota_size, (preq_info->phy_ota_dst) ?
+				DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
+
+	if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, NULL, NULL, -ENXIO);
+		return -ENXIO;
+	}
+	result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+	result_dump_status2 = be32_to_cpu(pce_sps_data->result->status2);
+
+	if ((result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+			| (1 <<  CRYPTO_HSD_ERR)))) {
+		pr_err(
+			"f8 oper error. Dump Sta %x Sta2 %x req %d\n",
+			result_dump_status, result_dump_status2, req_info);
+		result_status = -ENXIO;
+	} else if (pce_sps_data->consumer_status |
+				pce_sps_data->producer_status)  {
+		pr_err("f8 sps operation error. sps status %x %x\n",
+				pce_sps_data->consumer_status,
+				pce_sps_data->producer_status);
+		result_status = -ENXIO;
+	}
+	pce_sps_data->result->status = 0;
+	pce_sps_data->result->status2 = 0;
+	qce_free_req_info(pce_dev, req_info, true);
+	qce_callback(areq, NULL, NULL, result_status);
+	return 0;
+}
+
+static void _qce_sps_iovec_count_init(struct qce_device *pce_dev, int req_info)
+{
+	struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info[req_info]
+							.ce_sps;
+	pce_sps_data->in_transfer.iovec_count = 0;
+	pce_sps_data->out_transfer.iovec_count = 0;
+}
+
+static void _qce_set_flag(struct sps_transfer *sps_bam_pipe, uint32_t flag)
+{
+	struct sps_iovec *iovec;
+
+	if (sps_bam_pipe->iovec_count == 0)
+		return;
+	iovec  = sps_bam_pipe->iovec + (sps_bam_pipe->iovec_count - 1);
+	iovec->flags |= flag;
+}
+
+static int _qce_sps_add_data(dma_addr_t paddr, uint32_t len,
+		struct sps_transfer *sps_bam_pipe)
+{
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+					sps_bam_pipe->iovec_count;
+	uint32_t data_cnt;
+
+	while (len > 0) {
+		if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+			pr_err("Num of descrptor %d exceed max (%d)",
+				sps_bam_pipe->iovec_count,
+				(uint32_t)QCE_MAX_NUM_DSCR);
+			return -ENOMEM;
+		}
+		if (len > SPS_MAX_PKT_SIZE)
+			data_cnt = SPS_MAX_PKT_SIZE;
+		else
+			data_cnt = len;
+		iovec->size = data_cnt;
+		iovec->addr = SPS_GET_LOWER_ADDR(paddr);
+		iovec->flags = SPS_GET_UPPER_ADDR(paddr);
+		sps_bam_pipe->iovec_count++;
+		iovec++;
+		paddr += data_cnt;
+		len -= data_cnt;
+	}
+	return 0;
+}
+
+static int _qce_sps_add_sg_data(struct qce_device *pce_dev,
+		struct scatterlist *sg_src, uint32_t nbytes,
+		struct sps_transfer *sps_bam_pipe)
+{
+	uint32_t data_cnt, len;
+	dma_addr_t addr;
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+						sps_bam_pipe->iovec_count;
+
+	while (nbytes > 0) {
+		len = min(nbytes, sg_dma_len(sg_src));
+		nbytes -= len;
+		addr = sg_dma_address(sg_src);
+		if (pce_dev->ce_bam_info.minor_version == 0)
+			len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
+		while (len > 0) {
+			if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+				pr_err("Num of descrptor %d exceed max (%d)",
+						sps_bam_pipe->iovec_count,
+						(uint32_t)QCE_MAX_NUM_DSCR);
+				return -ENOMEM;
+			}
+			if (len > SPS_MAX_PKT_SIZE) {
+				data_cnt = SPS_MAX_PKT_SIZE;
+				iovec->size = data_cnt;
+				iovec->addr = SPS_GET_LOWER_ADDR(addr);
+				iovec->flags = SPS_GET_UPPER_ADDR(addr);
+			} else {
+				data_cnt = len;
+				iovec->size = data_cnt;
+				iovec->addr = SPS_GET_LOWER_ADDR(addr);
+				iovec->flags = SPS_GET_UPPER_ADDR(addr);
+			}
+			iovec++;
+			sps_bam_pipe->iovec_count++;
+			addr += data_cnt;
+			len -= data_cnt;
+		}
+		sg_src = sg_next(sg_src);
+	}
+	return 0;
+}
+
+static int _qce_sps_add_sg_data_off(struct qce_device *pce_dev,
+		struct scatterlist *sg_src, uint32_t nbytes, uint32_t off,
+		struct sps_transfer *sps_bam_pipe)
+{
+	uint32_t data_cnt, len;
+	dma_addr_t addr;
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+						sps_bam_pipe->iovec_count;
+	unsigned int res_within_sg;
+
+	if (!sg_src)
+		return -ENOENT;
+	res_within_sg = sg_dma_len(sg_src);
+
+	while (off > 0) {
+		if (!sg_src) {
+			pr_err("broken sg list off %d nbytes %d\n",
+				off, nbytes);
+			return -ENOENT;
+		}
+		len = sg_dma_len(sg_src);
+		if (off < len) {
+			res_within_sg = len - off;
+			break;
+		}
+		off -= len;
+		sg_src = sg_next(sg_src);
+		if (sg_src)
+			res_within_sg = sg_dma_len(sg_src);
+	}
+	while (nbytes > 0 && sg_src) {
+		len = min(nbytes, res_within_sg);
+		nbytes -= len;
+		addr = sg_dma_address(sg_src) + off;
+		if (pce_dev->ce_bam_info.minor_version == 0)
+			len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
+		while (len > 0) {
+			if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+				pr_err("Num of descrptor %d exceed max (%d)",
+						sps_bam_pipe->iovec_count,
+						(uint32_t)QCE_MAX_NUM_DSCR);
+				return -ENOMEM;
+			}
+			if (len > SPS_MAX_PKT_SIZE) {
+				data_cnt = SPS_MAX_PKT_SIZE;
+				iovec->size = data_cnt;
+				iovec->addr = SPS_GET_LOWER_ADDR(addr);
+				iovec->flags = SPS_GET_UPPER_ADDR(addr);
+			} else {
+				data_cnt = len;
+				iovec->size = data_cnt;
+				iovec->addr = SPS_GET_LOWER_ADDR(addr);
+				iovec->flags = SPS_GET_UPPER_ADDR(addr);
+			}
+			iovec++;
+			sps_bam_pipe->iovec_count++;
+			addr += data_cnt;
+			len -= data_cnt;
+		}
+		if (nbytes) {
+			sg_src = sg_next(sg_src);
+			if (!sg_src) {
+				pr_err("more data bytes %d\n", nbytes);
+				return -ENOMEM;
+			}
+			res_within_sg = sg_dma_len(sg_src);
+			off = 0;
+		}
+	}
+	return 0;
+}
+
+static int _qce_sps_add_cmd(struct qce_device *pce_dev, uint32_t flag,
+				struct qce_cmdlist_info *cmdptr,
+				struct sps_transfer *sps_bam_pipe)
+{
+	dma_addr_t  paddr = GET_PHYS_ADDR(cmdptr->cmdlist);
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+					sps_bam_pipe->iovec_count;
+	iovec->size = cmdptr->size;
+	iovec->addr = SPS_GET_LOWER_ADDR(paddr);
+	iovec->flags = SPS_GET_UPPER_ADDR(paddr) | SPS_IOVEC_FLAG_CMD | flag;
+	sps_bam_pipe->iovec_count++;
+	if (sps_bam_pipe->iovec_count >= QCE_MAX_NUM_DSCR) {
+		pr_err("Num of descrptor %d exceed max (%d)",
+			sps_bam_pipe->iovec_count, (uint32_t)QCE_MAX_NUM_DSCR);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int _qce_sps_transfer(struct qce_device *pce_dev, int req_info)
+{
+	int rc = 0;
+	struct ce_sps_data *pce_sps_data;
+
+	pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+	pce_sps_data->out_transfer.user =
+		(void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
+					(unsigned int) req_info));
+	pce_sps_data->in_transfer.user =
+		(void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
+					(unsigned int) req_info));
+	_qce_dump_descr_fifos_dbg(pce_dev, req_info);
+
+	if (pce_sps_data->in_transfer.iovec_count) {
+		rc = sps_transfer(pce_dev->ce_bam_info.consumer.pipe,
+					  &pce_sps_data->in_transfer);
+		if (rc) {
+			pr_err("sps_xfr() fail (consumer pipe=0x%lx) rc = %d\n",
+				(uintptr_t)pce_dev->ce_bam_info.consumer.pipe,
+				rc);
+			goto ret;
+		}
+	}
+	rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe,
+					  &pce_sps_data->out_transfer);
+	if (rc)
+		pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
+			(uintptr_t)pce_dev->ce_bam_info.producer.pipe, rc);
+ret:
+	if (rc)
+		_qce_dump_descr_fifos(pce_dev, req_info);
+	return rc;
+}
+
+/**
+ * Allocate and Connect a CE peripheral's SPS endpoint
+ *
+ * This function allocates endpoint context and
+ * connect it with memory endpoint by calling
+ * appropriate SPS driver APIs.
+ *
+ * Also registers a SPS callback function with
+ * SPS driver
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ * @ep   - Pointer to sps endpoint data structure
+ * @is_produce - 1 means Producer endpoint
+ *		 0 means Consumer endpoint
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int qce_sps_init_ep_conn(struct qce_device *pce_dev,
+				struct qce_sps_ep_conn_data *ep,
+				bool is_producer)
+{
+	int rc = 0;
+	struct sps_pipe *sps_pipe_info;
+	struct sps_connect *sps_connect_info = &ep->connect;
+	struct sps_register_event *sps_event = &ep->event;
+
+	/* Allocate endpoint context */
+	sps_pipe_info = sps_alloc_endpoint();
+	if (!sps_pipe_info) {
+		pr_err("sps_alloc_endpoint() failed!!! is_producer=%d",
+			   is_producer);
+		rc = -ENOMEM;
+		goto out;
+	}
+	/* Now save the sps pipe handle */
+	ep->pipe = sps_pipe_info;
+
+	/* Get default connection configuration for an endpoint */
+	rc = sps_get_config(sps_pipe_info, sps_connect_info);
+	if (rc) {
+		pr_err("sps_get_config() fail pipe_handle=0x%lx, rc = %d\n",
+				(uintptr_t)sps_pipe_info, rc);
+		goto get_config_err;
+	}
+
+	/* Modify the default connection configuration */
+	if (is_producer) {
+		/*
+		 * For CE producer transfer, source should be
+		 * CE peripheral where as destination should
+		 * be system memory.
+		 */
+		sps_connect_info->source = pce_dev->ce_bam_info.bam_handle;
+		sps_connect_info->destination = SPS_DEV_HANDLE_MEM;
+		/* Producer pipe will handle this connection */
+		sps_connect_info->mode = SPS_MODE_SRC;
+		sps_connect_info->options =
+			SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE;
+	} else {
+		/* For CE consumer transfer, source should be
+		 * system memory where as destination should
+		 * CE peripheral
+		 */
+		sps_connect_info->source = SPS_DEV_HANDLE_MEM;
+		sps_connect_info->destination = pce_dev->ce_bam_info.bam_handle;
+		sps_connect_info->mode = SPS_MODE_DEST;
+		sps_connect_info->options =
+			SPS_O_AUTO_ENABLE;
+	}
+
+	/* Producer pipe index */
+	sps_connect_info->src_pipe_index =
+				pce_dev->ce_bam_info.src_pipe_index;
+	/* Consumer pipe index */
+	sps_connect_info->dest_pipe_index =
+				pce_dev->ce_bam_info.dest_pipe_index;
+	/* Set pipe group */
+	sps_connect_info->lock_group = pce_dev->ce_bam_info.pipe_pair_index;
+	sps_connect_info->event_thresh = 0x10;
+	/*
+	 * Max. no of scatter/gather buffers that can
+	 * be passed by block layer = 32 (NR_SG).
+	 * Each BAM descritor needs 64 bits (8 bytes).
+	 * One BAM descriptor is required per buffer transfer.
+	 * So we would require total 256 (32 * 8) bytes of descriptor FIFO.
+	 * But due to HW limitation we need to allocate atleast one extra
+	 * descriptor memory (256 bytes + 8 bytes). But in order to be
+	 * in power of 2, we are allocating 512 bytes of memory.
+	 */
+	sps_connect_info->desc.size = QCE_MAX_NUM_DSCR * MAX_QCE_ALLOC_BAM_REQ *
+					sizeof(struct sps_iovec);
+	if (sps_connect_info->desc.size > MAX_SPS_DESC_FIFO_SIZE)
+		sps_connect_info->desc.size = MAX_SPS_DESC_FIFO_SIZE;
+	sps_connect_info->desc.base = dma_alloc_coherent(pce_dev->pdev,
+					sps_connect_info->desc.size,
+					&sps_connect_info->desc.phys_base,
+					GFP_KERNEL);
+	if (sps_connect_info->desc.base == NULL) {
+		rc = -ENOMEM;
+		pr_err("Can not allocate coherent memory for sps data\n");
+		goto get_config_err;
+	}
+
+	memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
+
+	/* Establish connection between peripheral and memory endpoint */
+	rc = sps_connect(sps_pipe_info, sps_connect_info);
+	if (rc) {
+		pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
+				(uintptr_t)sps_pipe_info, rc);
+		goto sps_connect_err;
+	}
+
+	sps_event->mode = SPS_TRIGGER_CALLBACK;
+	sps_event->xfer_done = NULL;
+	sps_event->user = (void *)pce_dev;
+	if (is_producer) {
+		sps_event->options = SPS_O_EOT | SPS_O_DESC_DONE;
+		sps_event->callback = _sps_producer_callback;
+		rc = sps_register_event(ep->pipe, sps_event);
+		if (rc) {
+			pr_err("Producer callback registration failed rc=%d\n",
+									rc);
+			goto sps_connect_err;
+		}
+	} else {
+		sps_event->options = SPS_O_EOT;
+		sps_event->callback = NULL;
+	}
+
+	pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%p\n",
+		is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)",
+		(uintptr_t)sps_pipe_info, &sps_connect_info->desc.phys_base);
+	goto out;
+
+sps_connect_err:
+	dma_free_coherent(pce_dev->pdev,
+			sps_connect_info->desc.size,
+			sps_connect_info->desc.base,
+			sps_connect_info->desc.phys_base);
+get_config_err:
+	sps_free_endpoint(sps_pipe_info);
+out:
+	return rc;
+}
+
+/**
+ * Disconnect and Deallocate a CE peripheral's SPS endpoint
+ *
+ * This function disconnect endpoint and deallocates
+ * endpoint context.
+ *
+ * This function should only be called once typically
+ * during driver remove.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ * @ep   - Pointer to sps endpoint data structure
+ *
+ */
+static void qce_sps_exit_ep_conn(struct qce_device *pce_dev,
+				struct qce_sps_ep_conn_data *ep)
+{
+	struct sps_pipe *sps_pipe_info = ep->pipe;
+	struct sps_connect *sps_connect_info = &ep->connect;
+
+	sps_disconnect(sps_pipe_info);
+	dma_free_coherent(pce_dev->pdev,
+			sps_connect_info->desc.size,
+			sps_connect_info->desc.base,
+			sps_connect_info->desc.phys_base);
+	sps_free_endpoint(sps_pipe_info);
+}
+
+static void qce_sps_release_bam(struct qce_device *pce_dev)
+{
+	struct bam_registration_info *pbam;
+
+	mutex_lock(&bam_register_lock);
+	pbam = pce_dev->pbam;
+	if (pbam == NULL)
+		goto ret;
+
+	pbam->cnt--;
+	if (pbam->cnt > 0)
+		goto ret;
+
+	if (pce_dev->ce_bam_info.bam_handle) {
+		sps_deregister_bam_device(pce_dev->ce_bam_info.bam_handle);
+
+		pr_debug("deregister bam handle 0x%lx\n",
+					pce_dev->ce_bam_info.bam_handle);
+		pce_dev->ce_bam_info.bam_handle = 0;
+	}
+	iounmap(pbam->bam_iobase);
+	pr_debug("delete bam 0x%x\n", pbam->bam_mem);
+	list_del(&pbam->qlist);
+	kfree(pbam);
+
+ret:
+	pce_dev->pbam = NULL;
+	mutex_unlock(&bam_register_lock);
+}
+
+static int qce_sps_get_bam(struct qce_device *pce_dev)
+{
+	int rc = 0;
+	struct sps_bam_props bam = {0};
+	struct bam_registration_info *pbam = NULL;
+	struct bam_registration_info *p;
+	uint32_t bam_cfg = 0;
+
+
+	mutex_lock(&bam_register_lock);
+
+	list_for_each_entry(p, &qce50_bam_list, qlist) {
+		if (p->bam_mem == pce_dev->bam_mem) {
+			pbam = p;  /* found */
+			break;
+		}
+	}
+
+	if (pbam) {
+		pr_debug("found bam 0x%x\n", pbam->bam_mem);
+		pbam->cnt++;
+		pce_dev->ce_bam_info.bam_handle =  pbam->handle;
+		pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
+		pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
+		pce_dev->pbam = pbam;
+		pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
+		goto ret;
+	}
+
+	pbam = kzalloc(sizeof(struct  bam_registration_info), GFP_KERNEL);
+	if (!pbam) {
+		rc = -ENOMEM;
+		goto ret;
+	}
+	pbam->cnt = 1;
+	pbam->bam_mem = pce_dev->bam_mem;
+	pbam->bam_iobase = ioremap_nocache(pce_dev->bam_mem,
+					pce_dev->bam_mem_size);
+	if (!pbam->bam_iobase) {
+		kfree(pbam);
+		rc = -ENOMEM;
+		pr_err("Can not map BAM io memory\n");
+		goto ret;
+	}
+	pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
+	pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
+	pbam->handle = 0;
+	pr_debug("allocate bam 0x%x\n", pbam->bam_mem);
+	bam_cfg = readl_relaxed(pce_dev->ce_bam_info.bam_iobase +
+					CRYPTO_BAM_CNFG_BITS_REG);
+	pbam->support_cmd_dscr =  (bam_cfg & CRYPTO_BAM_CD_ENABLE_MASK) ?
+					true : false;
+	if (pbam->support_cmd_dscr == false) {
+		pr_info("qce50 don't support command descriptor. bam_cfg%x\n",
+							bam_cfg);
+		pce_dev->no_get_around = false;
+	}
+	pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
+
+	bam.phys_addr = pce_dev->ce_bam_info.bam_mem;
+	bam.virt_addr = pce_dev->ce_bam_info.bam_iobase;
+
+	/*
+	 * This event thresold value is only significant for BAM-to-BAM
+	 * transfer. It's ignored for BAM-to-System mode transfer.
+	 */
+	bam.event_threshold = 0x10;	/* Pipe event threshold */
+	/*
+	 * This threshold controls when the BAM publish
+	 * the descriptor size on the sideband interface.
+	 * SPS HW will only be used when
+	 * data transfer size >  64 bytes.
+	 */
+	bam.summing_threshold = 64;
+	/* SPS driver wll handle the crypto BAM IRQ */
+	bam.irq = (u32)pce_dev->ce_bam_info.bam_irq;
+	/*
+	 * Set flag to indicate BAM global device control is managed
+	 * remotely.
+	 */
+	if ((pce_dev->support_cmd_dscr == false) || (pce_dev->is_shared))
+		bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
+	else
+		bam.manage = SPS_BAM_MGR_LOCAL;
+
+	bam.ee = pce_dev->ce_bam_info.bam_ee;
+	bam.ipc_loglevel = QCE_BAM_DEFAULT_IPC_LOGLVL;
+	bam.options |= SPS_BAM_CACHED_WP;
+	pr_debug("bam physical base=0x%lx\n", (uintptr_t)bam.phys_addr);
+	pr_debug("bam virtual base=0x%p\n", bam.virt_addr);
+
+	/* Register CE Peripheral BAM device to SPS driver */
+	rc = sps_register_bam_device(&bam, &pbam->handle);
+	if (rc) {
+		pr_err("sps_register_bam_device() failed! err=%d", rc);
+		rc = -EIO;
+		iounmap(pbam->bam_iobase);
+		kfree(pbam);
+		goto ret;
+	}
+
+	pce_dev->pbam = pbam;
+	list_add_tail(&pbam->qlist, &qce50_bam_list);
+	pce_dev->ce_bam_info.bam_handle =  pbam->handle;
+
+ret:
+	mutex_unlock(&bam_register_lock);
+
+	return rc;
+}
+/**
+ * Initialize SPS HW connected with CE core
+ *
+ * This function register BAM HW resources with
+ * SPS driver and then initialize 2 SPS endpoints
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int qce_sps_init(struct qce_device *pce_dev)
+{
+	int rc = 0;
+
+	rc = qce_sps_get_bam(pce_dev);
+	if (rc)
+		return rc;
+	pr_debug("BAM device registered. bam_handle=0x%lx\n",
+		pce_dev->ce_bam_info.bam_handle);
+
+	rc = qce_sps_init_ep_conn(pce_dev,
+			&pce_dev->ce_bam_info.producer, true);
+	if (rc)
+		goto sps_connect_producer_err;
+	rc = qce_sps_init_ep_conn(pce_dev,
+			&pce_dev->ce_bam_info.consumer, false);
+	if (rc)
+		goto sps_connect_consumer_err;
+
+	pr_info(" QTI MSM CE-BAM at 0x%016llx irq %d\n",
+		(unsigned long long)pce_dev->ce_bam_info.bam_mem,
+		(unsigned int)pce_dev->ce_bam_info.bam_irq);
+	return rc;
+
+sps_connect_consumer_err:
+	qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer);
+sps_connect_producer_err:
+	qce_sps_release_bam(pce_dev);
+	return rc;
+}
+
+static inline int qce_alloc_req_info(struct qce_device *pce_dev)
+{
+	int i;
+	int request_index = pce_dev->ce_request_index;
+
+	for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
+		request_index++;
+		if (request_index >= MAX_QCE_BAM_REQ)
+			request_index = 0;
+		if (xchg(&pce_dev->ce_request_info[request_index].
+						in_use, true) == false) {
+			pce_dev->ce_request_index = request_index;
+			return request_index;
+		}
+	}
+	pr_warn("pcedev %d no reqs available no_of_queued_req %d\n",
+			pce_dev->dev_no, atomic_read(
+					&pce_dev->no_of_queued_req));
+	return -EBUSY;
+}
+
+static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
+		bool is_complete)
+{
+	pce_dev->ce_request_info[req_info].xfer_type = QCE_XFER_TYPE_LAST;
+	if (xchg(&pce_dev->ce_request_info[req_info].in_use, false) == true) {
+		if (req_info < MAX_QCE_BAM_REQ && is_complete)
+			atomic_dec(&pce_dev->no_of_queued_req);
+	} else
+		pr_warn("request info %d free already\n", req_info);
+}
+
+static void print_notify_debug(struct sps_event_notify *notify)
+{
+	phys_addr_t addr =
+		DESC_FULL_ADDR((phys_addr_t) notify->data.transfer.iovec.flags,
+				  notify->data.transfer.iovec.addr);
+	pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%p\n",
+			notify->event_id, &addr,
+			notify->data.transfer.iovec.size,
+			notify->data.transfer.iovec.flags,
+			notify->data.transfer.user);
+}
+
+static void _qce_req_complete(struct qce_device *pce_dev, unsigned int req_info)
+{
+	struct ce_request_info *preq_info;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+
+	switch (preq_info->xfer_type) {
+	case QCE_XFER_CIPHERING:
+		_ablk_cipher_complete(pce_dev, req_info);
+		break;
+	case QCE_XFER_HASHING:
+		_sha_complete(pce_dev, req_info);
+		break;
+	case QCE_XFER_AEAD:
+		_aead_complete(pce_dev, req_info);
+		break;
+	case QCE_XFER_F8:
+		_f8_complete(pce_dev, req_info);
+		break;
+	case QCE_XFER_F9:
+		_f9_complete(pce_dev, req_info);
+		break;
+	default:
+		qce_free_req_info(pce_dev, req_info, true);
+		break;
+	}
+}
+
+static void qce_multireq_timeout(unsigned long data)
+{
+	struct qce_device *pce_dev = (struct qce_device *)data;
+	int ret = 0;
+	int last_seq;
+	unsigned long flags;
+
+	last_seq = atomic_read(&pce_dev->bunch_cmd_seq);
+	if (last_seq == 0 ||
+		last_seq != atomic_read(&pce_dev->last_intr_seq)) {
+		atomic_set(&pce_dev->last_intr_seq, last_seq);
+		mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
+		return;
+	}
+	/* last bunch mode command time out */
+
+	/*
+	 * From here to dummy request finish sps request and set owner back
+	 * to none, we disable interrupt.
+	 * So it won't get preempted or interrupted. If bam inerrupts happen
+	 * between, and completion callback gets called from BAM, a new
+	 * request may be issued by the client driver.  Deadlock may happen.
+	 */
+	local_irq_save(flags);
+	if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_TIMEOUT)
+							!= QCE_OWNER_NONE) {
+		local_irq_restore(flags);
+		mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
+		return;
+	}
+
+	ret = qce_dummy_req(pce_dev);
+	if (ret)
+		pr_warn("pcedev %d: Failed to insert dummy req\n",
+				pce_dev->dev_no);
+	cmpxchg(&pce_dev->owner, QCE_OWNER_TIMEOUT, QCE_OWNER_NONE);
+	pce_dev->mode = IN_INTERRUPT_MODE;
+	local_irq_restore(flags);
+
+	del_timer(&(pce_dev->timer));
+	pce_dev->qce_stats.no_of_timeouts++;
+	pr_debug("pcedev %d mode switch to INTR\n", pce_dev->dev_no);
+}
+
+void qce_get_driver_stats(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	if (!_qce50_disp_stats)
+		return;
+	pr_info("Engine %d timeout occuured %d\n", pce_dev->dev_no,
+			pce_dev->qce_stats.no_of_timeouts);
+	pr_info("Engine %d dummy request inserted %d\n", pce_dev->dev_no,
+			pce_dev->qce_stats.no_of_dummy_reqs);
+	if (pce_dev->mode)
+		pr_info("Engine %d is in BUNCH MODE\n", pce_dev->dev_no);
+	else
+		pr_info("Engine %d is in INTERRUPT MODE\n", pce_dev->dev_no);
+	pr_info("Engine %d outstanding request %d\n", pce_dev->dev_no,
+			atomic_read(&pce_dev->no_of_queued_req));
+}
+EXPORT_SYMBOL(qce_get_driver_stats);
+
+void qce_clear_driver_stats(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	pce_dev->qce_stats.no_of_timeouts = 0;
+	pce_dev->qce_stats.no_of_dummy_reqs = 0;
+}
+EXPORT_SYMBOL(qce_clear_driver_stats);
+
+static void _sps_producer_callback(struct sps_event_notify *notify)
+{
+	struct qce_device *pce_dev = (struct qce_device *)
+		((struct sps_event_notify *)notify)->user;
+	int rc = 0;
+	unsigned int req_info;
+	struct ce_sps_data *pce_sps_data;
+	struct ce_request_info *preq_info;
+
+	print_notify_debug(notify);
+
+	req_info = (unsigned int)((uintptr_t)notify->data.transfer.user);
+	if ((req_info & 0xffff0000)  != CRYPTO_REQ_USER_PAT) {
+		pr_warn("request information %d out of range\n", req_info);
+		return;
+	}
+
+	req_info = req_info & 0x00ff;
+	if (req_info < 0 || req_info >= MAX_QCE_ALLOC_BAM_REQ) {
+		pr_warn("request information %d out of range\n", req_info);
+		return;
+	}
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+
+	pce_sps_data = &preq_info->ce_sps;
+	if ((preq_info->xfer_type == QCE_XFER_CIPHERING ||
+		preq_info->xfer_type == QCE_XFER_AEAD) &&
+			pce_sps_data->producer_state == QCE_PIPE_STATE_IDLE) {
+		pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+		pce_sps_data->out_transfer.iovec_count = 0;
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer);
+		_qce_set_flag(&pce_sps_data->out_transfer,
+				SPS_IOVEC_FLAG_INT);
+		rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe,
+					  &pce_sps_data->out_transfer);
+		if (rc) {
+			pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
+				(uintptr_t)pce_dev->ce_bam_info.producer.pipe,
+				rc);
+		}
+		return;
+	}
+
+	_qce_req_complete(pce_dev, req_info);
+}
+
+/**
+ * De-initialize SPS HW connected with CE core
+ *
+ * This function deinitialize SPS endpoints and then
+ * deregisters BAM resources from SPS driver.
+ *
+ * This function should only be called once typically
+ * during driver remove.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ *
+ */
+static void qce_sps_exit(struct qce_device *pce_dev)
+{
+	qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.consumer);
+	qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer);
+	qce_sps_release_bam(pce_dev);
+}
+
+static void qce_add_cmd_element(struct qce_device *pdev,
+			struct sps_command_element **cmd_ptr, u32 addr,
+			u32 data, struct sps_command_element **populate)
+{
+	(*cmd_ptr)->addr = (uint32_t)(addr + pdev->phy_iobase);
+	(*cmd_ptr)->command = 0;
+	(*cmd_ptr)->data = data;
+	(*cmd_ptr)->mask = 0xFFFFFFFF;
+	(*cmd_ptr)->reserved = 0;
+	if (populate != NULL)
+		*populate = *cmd_ptr;
+	(*cmd_ptr)++;
+}
+
+static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index,
+		unsigned char **pvaddr, enum qce_cipher_mode_enum mode,
+		bool key_128)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t key_reg = 0;
+	uint32_t xts_key_reg = 0;
+	uint32_t iv_reg = 0;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to AES cipher operations defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+	switch (mode) {
+	case QCE_MODE_CBC:
+	case QCE_MODE_CTR:
+		if (key_128 == true) {
+			cmdlistptr->cipher_aes_128_cbc_ctr.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_128_cbc_ctr);
+			if (mode == QCE_MODE_CBC)
+				encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
+			else
+				encr_cfg = pdev->reg.encr_cfg_aes_ctr_128;
+			iv_reg = 4;
+			key_reg = 4;
+			xts_key_reg = 0;
+		} else {
+			cmdlistptr->cipher_aes_256_cbc_ctr.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_256_cbc_ctr);
+
+			if (mode == QCE_MODE_CBC)
+				encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
+			else
+				encr_cfg = pdev->reg.encr_cfg_aes_ctr_256;
+			iv_reg = 4;
+			key_reg = 8;
+			xts_key_reg = 0;
+		}
+	break;
+	case QCE_MODE_ECB:
+		if (key_128 == true) {
+			cmdlistptr->cipher_aes_128_ecb.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_128_ecb);
+
+			encr_cfg = pdev->reg.encr_cfg_aes_ecb_128;
+			iv_reg = 0;
+			key_reg = 4;
+			xts_key_reg = 0;
+		} else {
+			cmdlistptr->cipher_aes_256_ecb.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_256_ecb);
+
+			encr_cfg = pdev->reg.encr_cfg_aes_ecb_256;
+			iv_reg = 0;
+			key_reg = 8;
+			xts_key_reg = 0;
+		}
+	break;
+	case QCE_MODE_XTS:
+		if (key_128 == true) {
+			cmdlistptr->cipher_aes_128_xts.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_128_xts);
+
+			encr_cfg = pdev->reg.encr_cfg_aes_xts_128;
+			iv_reg = 4;
+			key_reg = 4;
+			xts_key_reg = 4;
+		} else {
+			cmdlistptr->cipher_aes_256_xts.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_256_xts);
+
+			encr_cfg = pdev->reg.encr_cfg_aes_xts_256;
+			iv_reg = 4;
+			key_reg = 8;
+			xts_key_reg = 8;
+		}
+	break;
+	default:
+		pr_err("Unknown mode of operation %d received, exiting now\n",
+			mode);
+		return -EINVAL;
+	break;
+	}
+
+	/* clear status register */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
+				(uint32_t)0xffffffff, &pcl_info->encr_mask);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
+				(uint32_t)0xffffffff, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
+				(uint32_t)0xffffffff, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
+				(uint32_t)0xffffffff, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+						&pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	if (xts_key_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_XTS_KEY0_REG,
+					0, &pcl_info->encr_xts_key);
+		for (i = 1; i < xts_key_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_XTS_KEY0_REG +
+						i * sizeof(uint32_t)), 0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				CRYPTO_ENCR_XTS_DU_SIZE_REG, 0,
+					&pcl_info->encr_xts_du_size);
+	}
+	if (iv_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+		for (i = 1; i < iv_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	}
+	/* Add dummy to  align size to burst-size multiple */
+	if (mode == QCE_MODE_XTS) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+						0, &pcl_info->auth_seg_size);
+	} else {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+						0, &pcl_info->auth_seg_size);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
+						0, &pcl_info->auth_seg_size);
+	}
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev, int cri_index,
+		unsigned char **pvaddr, enum qce_cipher_alg_enum alg,
+		bool mode_cbc)
+{
+
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t key_reg = 0;
+	uint32_t iv_reg = 0;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to cipher operations defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+	switch (alg) {
+	case CIPHER_ALG_DES:
+		if (mode_cbc) {
+			cmdlistptr->cipher_des_cbc.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_des_cbc);
+
+
+			encr_cfg = pdev->reg.encr_cfg_des_cbc;
+			iv_reg = 2;
+			key_reg = 2;
+		} else {
+			cmdlistptr->cipher_des_ecb.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_des_ecb);
+
+			encr_cfg = pdev->reg.encr_cfg_des_ecb;
+			iv_reg = 0;
+			key_reg = 2;
+		}
+	break;
+	case CIPHER_ALG_3DES:
+		if (mode_cbc) {
+			cmdlistptr->cipher_3des_cbc.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_3des_cbc);
+
+			encr_cfg = pdev->reg.encr_cfg_3des_cbc;
+			iv_reg = 2;
+			key_reg = 6;
+		} else {
+			cmdlistptr->cipher_3des_ecb.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_3des_ecb);
+
+			encr_cfg = pdev->reg.encr_cfg_3des_ecb;
+			iv_reg = 0;
+			key_reg = 6;
+		}
+	break;
+	default:
+		pr_err("Unknown algorithms %d received, exiting now\n", alg);
+		return -EINVAL;
+	break;
+	}
+
+	/* clear status register */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+						&pcl_info->auth_seg_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	if (iv_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
+								NULL);
+	}
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_cipher_null_cmdlistptrs(struct qce_device *pdev,
+		int cri_index, unsigned char **pvaddr)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
+						[cri_index].ce_sps.cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+	cmdlistptr->cipher_null.cmdlist = (uintptr_t)ce_vaddr;
+	pcl_info = &(cmdlistptr->cipher_null);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG,
+			pdev->ce_bam_info.ce_burst_size, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
+			pdev->reg.encr_cfg_aes_ecb_128, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+			NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+			NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+					0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+			 0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+						NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+	return 0;
+}
+
+static int _setup_auth_cmdlistptrs(struct qce_device *pdev, int cri_index,
+		unsigned char **pvaddr, enum qce_hash_alg_enum alg,
+		bool key_128)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t key_reg = 0;
+	uint32_t auth_cfg = 0;
+	uint32_t iv_reg = 0;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to authentication operations
+	 * defined in ce_cmdlistptrs_ops structure.
+	 */
+	switch (alg) {
+	case QCE_HASH_SHA1:
+		cmdlistptr->auth_sha1.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha1);
+
+		auth_cfg = pdev->reg.auth_cfg_sha1;
+		iv_reg = 5;
+
+		/* clear status register */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+					0, NULL);
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	break;
+	case QCE_HASH_SHA256:
+		cmdlistptr->auth_sha256.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha256);
+
+		auth_cfg = pdev->reg.auth_cfg_sha256;
+		iv_reg = 8;
+
+		/* clear status register */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+					0, NULL);
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+		/* 1 dummy write */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+	break;
+	case QCE_HASH_SHA1_HMAC:
+		cmdlistptr->auth_sha1_hmac.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha1_hmac);
+
+		auth_cfg = pdev->reg.auth_cfg_hmac_sha1;
+		key_reg = 16;
+		iv_reg = 5;
+
+		/* clear status register */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+					0, NULL);
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+	break;
+	case QCE_HASH_SHA256_HMAC:
+		cmdlistptr->auth_sha256_hmac.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha256_hmac);
+
+		auth_cfg = pdev->reg.auth_cfg_hmac_sha256;
+		key_reg = 16;
+		iv_reg = 8;
+
+		/* clear status register */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
+					NULL);
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+		/* 1 dummy write */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+	break;
+	case QCE_HASH_AES_CMAC:
+		if (key_128 == true) {
+			cmdlistptr->auth_aes_128_cmac.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->auth_aes_128_cmac);
+
+			auth_cfg = pdev->reg.auth_cfg_cmac_128;
+			key_reg = 4;
+		} else {
+			cmdlistptr->auth_aes_256_cmac.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->auth_aes_256_cmac);
+
+			auth_cfg = pdev->reg.auth_cfg_cmac_256;
+			key_reg = 8;
+		}
+
+		/* clear status register */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
+					NULL);
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+		/* 1 dummy write */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+	break;
+	default:
+		pr_err("Unknown algorithms %d received, exiting now\n", alg);
+		return -EINVAL;
+	break;
+	}
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+					auth_cfg, &pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+						&pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+						&pcl_info->auth_seg_start);
+
+	if (alg == QCE_HASH_AES_CMAC) {
+		/* reset auth iv, bytecount and key  registers */
+		for (i = 0; i < 16; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+		for (i = 0; i < 16; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+						0, NULL);
+	} else {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+							&pcl_info->auth_iv);
+		for (i = 1; i < iv_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+						0, &pcl_info->auth_bytecount);
+	}
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+	if (key_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				CRYPTO_AUTH_KEY0_REG, 0, &pcl_info->auth_key);
+		for (i = 1; i < key_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+	}
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_aead_cmdlistptrs(struct qce_device *pdev,
+				int cri_index,
+				unsigned char **pvaddr,
+				uint32_t alg,
+				uint32_t mode,
+				uint32_t key_size,
+				bool     sha1)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	uint32_t key_reg;
+	uint32_t iv_reg;
+	uint32_t i;
+	uint32_t  enciv_in_word;
+	uint32_t encr_cfg;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+	switch (alg) {
+
+	case CIPHER_ALG_DES:
+
+		switch (mode) {
+
+		case QCE_MODE_CBC:
+			if (sha1) {
+				cmdlistptr->aead_hmac_sha1_cbc_des.cmdlist =
+					(uintptr_t)ce_vaddr;
+				pcl_info = &(cmdlistptr->
+					aead_hmac_sha1_cbc_des);
+			} else {
+				cmdlistptr->aead_hmac_sha256_cbc_des.cmdlist =
+					(uintptr_t)ce_vaddr;
+				pcl_info = &(cmdlistptr->
+					aead_hmac_sha256_cbc_des);
+			}
+			encr_cfg = pdev->reg.encr_cfg_des_cbc;
+			break;
+		default:
+			return -EINVAL;
+		};
+
+		enciv_in_word = 2;
+
+		break;
+
+	case CIPHER_ALG_3DES:
+		switch (mode) {
+
+		case QCE_MODE_CBC:
+			if (sha1) {
+				cmdlistptr->aead_hmac_sha1_cbc_3des.cmdlist =
+					(uintptr_t)ce_vaddr;
+				pcl_info = &(cmdlistptr->
+					aead_hmac_sha1_cbc_3des);
+			} else {
+				cmdlistptr->aead_hmac_sha256_cbc_3des.cmdlist =
+					(uintptr_t)ce_vaddr;
+				pcl_info = &(cmdlistptr->
+					aead_hmac_sha256_cbc_3des);
+			}
+			encr_cfg = pdev->reg.encr_cfg_3des_cbc;
+			break;
+		default:
+			return -EINVAL;
+		};
+
+		enciv_in_word = 2;
+
+		break;
+
+	case CIPHER_ALG_AES:
+		switch (mode) {
+
+		case QCE_MODE_CBC:
+			if (key_size ==  AES128_KEY_SIZE) {
+				if (sha1) {
+					cmdlistptr->
+						aead_hmac_sha1_cbc_aes_128.
+						cmdlist = (uintptr_t)ce_vaddr;
+					pcl_info = &(cmdlistptr->
+						aead_hmac_sha1_cbc_aes_128);
+				} else {
+					cmdlistptr->
+						aead_hmac_sha256_cbc_aes_128.
+						cmdlist = (uintptr_t)ce_vaddr;
+					pcl_info = &(cmdlistptr->
+						aead_hmac_sha256_cbc_aes_128);
+				}
+				encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
+			} else if (key_size ==  AES256_KEY_SIZE) {
+				if (sha1) {
+					cmdlistptr->
+						aead_hmac_sha1_cbc_aes_256.
+						cmdlist = (uintptr_t)ce_vaddr;
+					pcl_info = &(cmdlistptr->
+						aead_hmac_sha1_cbc_aes_256);
+				} else {
+					cmdlistptr->
+						aead_hmac_sha256_cbc_aes_256.
+						cmdlist = (uintptr_t)ce_vaddr;
+					pcl_info = &(cmdlistptr->
+						aead_hmac_sha256_cbc_aes_256);
+				}
+				encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		default:
+			return -EINVAL;
+		};
+
+		enciv_in_word = 4;
+
+		break;
+
+	default:
+		return -EINVAL;
+	};
+
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+
+	key_reg = key_size/sizeof(uint32_t);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+			&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+			(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+			0, NULL);
+
+	if (mode != QCE_MODE_ECB) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+			&pcl_info->encr_cntr_iv);
+		for (i = 1; i < enciv_in_word; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	};
+
+	if (sha1)
+		iv_reg = 5;
+	else
+		iv_reg = 8;
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+				&pcl_info->auth_iv);
+	for (i = 1; i < iv_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+			(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+				0, &pcl_info->auth_bytecount);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+	key_reg = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
+			 &pcl_info->auth_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+			(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)), 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+			&pcl_info->seg_size);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+			&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+			&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+			&pcl_info->encr_seg_start);
+
+	if (sha1)
+		qce_add_cmd_element(
+			pdev,
+			&ce_vaddr,
+			CRYPTO_AUTH_SEG_CFG_REG,
+			pdev->reg.auth_cfg_aead_sha1_hmac,
+			&pcl_info->auth_seg_cfg);
+	else
+		qce_add_cmd_element(
+			pdev,
+			&ce_vaddr,
+			CRYPTO_AUTH_SEG_CFG_REG,
+			pdev->reg.auth_cfg_aead_sha256_hmac,
+			&pcl_info->auth_seg_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+			&pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+			&pcl_info->auth_seg_start);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+	return 0;
+}
+
+static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, int cri_index,
+				unsigned char **pvaddr, bool key_128)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
+						[cri_index].ce_sps.cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t auth_cfg = 0;
+	uint32_t key_reg = 0;
+
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to aead operations
+	 * defined in ce_cmdlistptrs_ops structure.
+	 */
+	if (key_128 == true) {
+		cmdlistptr->aead_aes_128_ccm.cmdlist =
+						(uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->aead_aes_128_ccm);
+
+		auth_cfg = pdev->reg.auth_cfg_aes_ccm_128;
+		encr_cfg = pdev->reg.encr_cfg_aes_ccm_128;
+		key_reg = 4;
+	} else {
+
+		cmdlistptr->aead_aes_256_ccm.cmdlist =
+						(uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->aead_aes_256_ccm);
+
+		auth_cfg = pdev->reg.auth_cfg_aes_ccm_256;
+		encr_cfg = pdev->reg.encr_cfg_aes_ccm_256;
+
+		key_reg = 8;
+	}
+
+	/* clear status register */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+									NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
+					encr_cfg, &pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
+				(uint32_t)0xffffffff, &pcl_info->encr_mask);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
+				(uint32_t)0xffffffff, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
+				(uint32_t)0xffffffff, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
+				(uint32_t)0xffffffff, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+					auth_cfg, &pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+						&pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+						&pcl_info->auth_seg_start);
+	/* reset auth iv, bytecount and key  registers */
+	for (i = 0; i < 8; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+					0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG,
+					0, NULL);
+	for (i = 0; i < 16; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	/* set auth key */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
+							&pcl_info->auth_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	/* set NONCE info */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_INFO_NONCE0_REG, 0,
+						&pcl_info->auth_nonce_info);
+	for (i = 1; i < 4; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_INFO_NONCE0_REG +
+				i * sizeof(uint32_t)), 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+	for (i = 1; i < 4; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_CCM_INT_CNTR0_REG, 0,
+						&pcl_info->encr_ccm_cntr_iv);
+	for (i = 1; i < 4; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+			(CRYPTO_ENCR_CCM_INT_CNTR0_REG + i * sizeof(uint32_t)),
+			0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_f8_cmdlistptrs(struct qce_device *pdev, int cri_index,
+	unsigned char **pvaddr, enum qce_ota_algo_enum alg)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t key_reg = 4;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to f8 cipher algorithm defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+
+	switch (alg) {
+	case QCE_OTA_ALGO_KASUMI:
+		cmdlistptr->f8_kasumi.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->f8_kasumi);
+		encr_cfg = pdev->reg.encr_cfg_kasumi;
+		break;
+
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		cmdlistptr->f8_snow3g.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->f8_snow3g);
+		encr_cfg = pdev->reg.encr_cfg_snow3g;
+		break;
+	}
+	/* clear status register */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+							0, NULL);
+	/* set config to big endian */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+						&pcl_info->auth_seg_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+						0, &pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
+						0, &pcl_info->auth_seg_start);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						 &pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
+								NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_f9_cmdlistptrs(struct qce_device *pdev, int cri_index,
+	unsigned char **pvaddr, enum qce_ota_algo_enum alg)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t auth_cfg = 0;
+	uint32_t iv_reg = 0;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to authentication operations
+	 * defined in ce_cmdlistptrs_ops structure.
+	 */
+	switch (alg) {
+	case QCE_OTA_ALGO_KASUMI:
+		cmdlistptr->f9_kasumi.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->f9_kasumi);
+		auth_cfg = pdev->reg.auth_cfg_kasumi;
+		break;
+
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		cmdlistptr->f9_snow3g.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->f9_snow3g);
+		auth_cfg = pdev->reg.auth_cfg_snow3g;
+	};
+
+	/* clear status register */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+							0, NULL);
+	/* set config to big endian */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	iv_reg = 5;
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
+						&pcl_info->encr_seg_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+					auth_cfg, &pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+						&pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+						&pcl_info->auth_seg_start);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+							&pcl_info->auth_iv);
+	for (i = 1; i < iv_reg; i++) {
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+	}
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+					0, &pcl_info->auth_bytecount);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev,
+		int cri_index, unsigned char **pvaddr)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start = (uintptr_t)(*pvaddr);
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+	cmdlistptr->unlock_all_pipes.cmdlist = (uintptr_t)ce_vaddr;
+	pcl_info = &(cmdlistptr->unlock_all_pipes);
+
+	/*
+	 * Designate chunks of the allocated memory to command list
+	 * to unlock pipes.
+	 */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					CRYPTO_CONFIG_RESET, NULL);
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int qce_setup_cmdlistptrs(struct qce_device *pdev, int cri_index,
+					unsigned char **pvaddr)
+{
+	struct sps_command_element *ce_vaddr =
+				(struct sps_command_element *)(*pvaddr);
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to operations defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+	ce_vaddr =
+		(struct sps_command_element *)ALIGN(((uintptr_t) ce_vaddr),
+					pdev->ce_bam_info.ce_burst_size);
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
+								true);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
+								true);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
+								true);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
+								true);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
+								false);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
+								false);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
+								false);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
+								false);
+
+	_setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+								true);
+	_setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+								false);
+	_setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+								true);
+	_setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+								false);
+
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1,
+								false);
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256,
+								false);
+
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1_HMAC,
+								false);
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256_HMAC,
+								false);
+
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
+								true);
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
+								false);
+
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+					QCE_MODE_CBC, DES_KEY_SIZE, true);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+					QCE_MODE_CBC, DES3_EDE_KEY_SIZE, true);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+					QCE_MODE_CBC, AES128_KEY_SIZE, true);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+					QCE_MODE_CBC, AES256_KEY_SIZE, true);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+					QCE_MODE_CBC, DES_KEY_SIZE, false);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+					QCE_MODE_CBC, DES3_EDE_KEY_SIZE, false);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+					QCE_MODE_CBC, AES128_KEY_SIZE, false);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+					QCE_MODE_CBC, AES256_KEY_SIZE, false);
+
+	_setup_cipher_null_cmdlistptrs(pdev, cri_index, pvaddr);
+
+	_setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, true);
+	_setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, false);
+	_setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
+	_setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
+	_setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
+	_setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
+	_setup_unlock_pipe_cmdlistptrs(pdev, cri_index, pvaddr);
+
+	return 0;
+}
+
+static int qce_setup_ce_sps_data(struct qce_device *pce_dev)
+{
+	unsigned char *vaddr;
+	int i;
+	unsigned char *iovec_vaddr;
+	int iovec_memsize;
+
+	vaddr = pce_dev->coh_vmem;
+	vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
+					pce_dev->ce_bam_info.ce_burst_size);
+	iovec_vaddr = pce_dev->iovec_vmem;
+	iovec_memsize = pce_dev->iovec_memsize;
+	for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++) {
+		/* Allow for 256 descriptor (cmd and data) entries per pipe */
+		pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec =
+				(struct sps_iovec *)iovec_vaddr;
+		pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec_phys =
+			virt_to_phys(pce_dev->ce_request_info[i].
+				ce_sps.in_transfer.iovec);
+		iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
+		iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
+		pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec =
+				(struct sps_iovec *)iovec_vaddr;
+		pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec_phys =
+			virt_to_phys(pce_dev->ce_request_info[i].
+				ce_sps.out_transfer.iovec);
+		iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
+		iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
+		if (pce_dev->support_cmd_dscr)
+			qce_setup_cmdlistptrs(pce_dev, i, &vaddr);
+		vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
+				pce_dev->ce_bam_info.ce_burst_size);
+		pce_dev->ce_request_info[i].ce_sps.result_dump =
+				(uintptr_t)vaddr;
+		pce_dev->ce_request_info[i].ce_sps.result_dump_phy =
+				GET_PHYS_ADDR((uintptr_t)vaddr);
+		pce_dev->ce_request_info[i].ce_sps.result =
+				(struct ce_result_dump_format *)vaddr;
+		vaddr += CRYPTO_RESULT_DUMP_SIZE;
+
+		pce_dev->ce_request_info[i].ce_sps.result_dump_null =
+				(uintptr_t)vaddr;
+		pce_dev->ce_request_info[i].ce_sps.result_dump_null_phy =
+				GET_PHYS_ADDR((uintptr_t)vaddr);
+		pce_dev->ce_request_info[i].ce_sps.result_null =
+				(struct ce_result_dump_format *)vaddr;
+		vaddr += CRYPTO_RESULT_DUMP_SIZE;
+
+		pce_dev->ce_request_info[i].ce_sps.ignore_buffer =
+				(uintptr_t)vaddr;
+		vaddr += pce_dev->ce_bam_info.ce_burst_size * 2;
+	}
+	if ((vaddr - pce_dev->coh_vmem) > pce_dev->memsize ||
+							iovec_memsize < 0)
+		panic("qce50: Not enough coherent memory. Allocate %x , need %lx\n",
+				 pce_dev->memsize, (uintptr_t)vaddr -
+				(uintptr_t)pce_dev->coh_vmem);
+	return 0;
+}
+
+static int qce_init_ce_cfg_val(struct qce_device *pce_dev)
+{
+	uint32_t beats = (pce_dev->ce_bam_info.ce_burst_size >> 3) - 1;
+	uint32_t pipe_pair = pce_dev->ce_bam_info.pipe_pair_index;
+
+	pce_dev->reg.crypto_cfg_be = (beats << CRYPTO_REQ_SIZE) |
+		BIT(CRYPTO_MASK_DOUT_INTR) | BIT(CRYPTO_MASK_DIN_INTR) |
+		BIT(CRYPTO_MASK_OP_DONE_INTR) | (0 << CRYPTO_HIGH_SPD_EN_N) |
+		(pipe_pair << CRYPTO_PIPE_SET_SELECT);
+
+	pce_dev->reg.crypto_cfg_le =
+		(pce_dev->reg.crypto_cfg_be | CRYPTO_LITTLE_ENDIAN_MASK);
+
+	/* Initialize encr_cfg register for AES alg */
+	pce_dev->reg.encr_cfg_aes_cbc_128 =
+		(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_cbc_256 =
+		(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_ctr_128 =
+		(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_ctr_256 =
+		(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_xts_128 =
+		(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_xts_256 =
+		(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_ecb_128 =
+		(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_ecb_256 =
+		(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_ccm_128 =
+		(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE)|
+		(CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
+
+	pce_dev->reg.encr_cfg_aes_ccm_256 =
+		(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
+		(CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
+
+	/* Initialize encr_cfg register for DES alg */
+	pce_dev->reg.encr_cfg_des_ecb =
+		(CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_des_cbc =
+		(CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_3des_ecb =
+		(CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_3des_cbc =
+		(CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+	/* Initialize encr_cfg register for kasumi/snow3g  alg */
+	pce_dev->reg.encr_cfg_kasumi =
+		(CRYPTO_ENCR_ALG_KASUMI << CRYPTO_ENCR_ALG);
+
+	pce_dev->reg.encr_cfg_snow3g =
+		(CRYPTO_ENCR_ALG_SNOW_3G << CRYPTO_ENCR_ALG);
+
+	/* Initialize auth_cfg register for CMAC alg */
+	pce_dev->reg.auth_cfg_cmac_128 =
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+		(CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE);
+
+	pce_dev->reg.auth_cfg_cmac_256 =
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+		(CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE);
+
+	/* Initialize auth_cfg register for HMAC alg */
+	pce_dev->reg.auth_cfg_hmac_sha1 =
+		(CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	pce_dev->reg.auth_cfg_hmac_sha256 =
+		(CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	/* Initialize auth_cfg register for SHA1/256 alg */
+	pce_dev->reg.auth_cfg_sha1 =
+		(CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	pce_dev->reg.auth_cfg_sha256 =
+		(CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	/* Initialize auth_cfg register for AEAD alg */
+	pce_dev->reg.auth_cfg_aead_sha1_hmac =
+		(CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
+
+	pce_dev->reg.auth_cfg_aead_sha256_hmac =
+		(CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
+
+	pce_dev->reg.auth_cfg_aes_ccm_128 =
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+		(CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE) |
+		((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
+	pce_dev->reg.auth_cfg_aes_ccm_128 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+
+	pce_dev->reg.auth_cfg_aes_ccm_256 =
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+		(CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE) |
+		((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
+	pce_dev->reg.auth_cfg_aes_ccm_256 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+
+	/* Initialize auth_cfg register for kasumi/snow3g */
+	pce_dev->reg.auth_cfg_kasumi =
+			(CRYPTO_AUTH_ALG_KASUMI << CRYPTO_AUTH_ALG) |
+				BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
+	pce_dev->reg.auth_cfg_snow3g =
+			(CRYPTO_AUTH_ALG_SNOW3G << CRYPTO_AUTH_ALG) |
+				BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
+	return 0;
+}
+
+static void _qce_ccm_get_around_input(struct qce_device *pce_dev,
+	struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
+{
+	struct qce_cmdlist_info *cmdlistinfo;
+	struct ce_sps_data *pce_sps_data;
+
+	pce_sps_data = &preq_info->ce_sps;
+	if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
+			!(pce_dev->no_ccm_mac_status_get_around)) {
+		cmdlistinfo = &pce_sps_data->cmdlistptr.cipher_null;
+		_qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
+				&pce_sps_data->in_transfer);
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+			pce_dev->ce_bam_info.ce_burst_size,
+			&pce_sps_data->in_transfer);
+		_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD);
+	}
+}
+
+static void _qce_ccm_get_around_output(struct qce_device *pce_dev,
+	struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
+{
+	struct ce_sps_data *pce_sps_data;
+
+	pce_sps_data = &preq_info->ce_sps;
+
+	if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
+			!(pce_dev->no_ccm_mac_status_get_around)) {
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+			pce_dev->ce_bam_info.ce_burst_size,
+			&pce_sps_data->out_transfer);
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump_null),
+			CRYPTO_RESULT_DUMP_SIZE, &pce_sps_data->out_transfer);
+	}
+}
+
+/* QCE_DUMMY_REQ */
+static void qce_dummy_complete(void *cookie, unsigned char *digest,
+		unsigned char *authdata, int ret)
+{
+	if (!cookie)
+		pr_err("invalid cookie\n");
+}
+
+static int qce_dummy_req(struct qce_device *pce_dev)
+{
+	int ret = 0;
+
+	if (!(xchg(&pce_dev->ce_request_info[DUMMY_REQ_INDEX].
+				in_use, true) == false))
+		return -EBUSY;
+	ret = qce_process_sha_req(pce_dev, NULL);
+	pce_dev->qce_stats.no_of_dummy_reqs++;
+	return ret;
+}
+
+static int select_mode(struct qce_device *pce_dev,
+		struct ce_request_info *preq_info)
+{
+	struct ce_sps_data *pce_sps_data = &preq_info->ce_sps;
+	unsigned int no_of_queued_req;
+	unsigned int cadence;
+
+	if (!pce_dev->no_get_around) {
+		_qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
+		return 0;
+	}
+
+	/*
+	 * claim ownership of device
+	 */
+again:
+	if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_CLIENT)
+							!= QCE_OWNER_NONE) {
+		ndelay(40);
+		goto again;
+	}
+	no_of_queued_req = atomic_inc_return(&pce_dev->no_of_queued_req);
+	if (pce_dev->mode == IN_INTERRUPT_MODE) {
+		if (no_of_queued_req >= MAX_BUNCH_MODE_REQ) {
+			pce_dev->mode = IN_BUNCH_MODE;
+			pr_debug("pcedev %d mode switch to BUNCH\n",
+					pce_dev->dev_no);
+			_qce_set_flag(&pce_sps_data->out_transfer,
+					SPS_IOVEC_FLAG_INT);
+			pce_dev->intr_cadence = 0;
+			atomic_set(&pce_dev->bunch_cmd_seq, 1);
+			atomic_set(&pce_dev->last_intr_seq, 1);
+			mod_timer(&(pce_dev->timer),
+					(jiffies + DELAY_IN_JIFFIES));
+		} else {
+			_qce_set_flag(&pce_sps_data->out_transfer,
+					SPS_IOVEC_FLAG_INT);
+		}
+	} else {
+		pce_dev->intr_cadence++;
+		cadence = (preq_info->req_len >> 7) + 1;
+		if (cadence > SET_INTR_AT_REQ)
+			cadence = SET_INTR_AT_REQ;
+		if (pce_dev->intr_cadence < cadence || ((pce_dev->intr_cadence
+					== cadence) && pce_dev->cadence_flag))
+			atomic_inc(&pce_dev->bunch_cmd_seq);
+		else {
+			_qce_set_flag(&pce_sps_data->out_transfer,
+					SPS_IOVEC_FLAG_INT);
+			pce_dev->intr_cadence = 0;
+			atomic_set(&pce_dev->bunch_cmd_seq, 0);
+			atomic_set(&pce_dev->last_intr_seq, 0);
+			pce_dev->cadence_flag = ~pce_dev->cadence_flag;
+		}
+	}
+
+	return 0;
+}
+
+static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
+{
+	int rc = 0;
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct aead_request *areq = (struct aead_request *) q_req->areq;
+	uint32_t authsize = q_req->authsize;
+	uint32_t totallen_in, out_len;
+	uint32_t hw_pad_out = 0;
+	int ce_burst_size;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+	int req_info = -1;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+
+	ce_burst_size = pce_dev->ce_bam_info.ce_burst_size;
+	totallen_in = areq->cryptlen + q_req->assoclen;
+	if (q_req->dir == QCE_ENCRYPT) {
+		q_req->cryptlen = areq->cryptlen;
+		out_len = areq->cryptlen + authsize;
+		hw_pad_out = ALIGN(authsize, ce_burst_size) - authsize;
+	} else {
+		q_req->cryptlen = areq->cryptlen - authsize;
+		out_len = q_req->cryptlen;
+		hw_pad_out = authsize;
+	}
+
+	/*
+	 * For crypto 5.0 that has burst size alignment requirement
+	 * for data descritpor,
+	 * the agent above(qcrypto) prepares the src scatter list with
+	 * memory starting with associated data, followed by
+	 * data stream to be ciphered.
+	 * The destination scatter list is pointing to the same
+	 * data area as source.
+	 */
+	if (pce_dev->ce_bam_info.minor_version == 0)
+		preq_info->src_nents = count_sg(areq->src, totallen_in);
+	else
+		preq_info->src_nents = count_sg(areq->src, areq->cryptlen +
+							areq->assoclen);
+
+	if (q_req->assoclen) {
+		preq_info->assoc_nents = count_sg(q_req->asg, q_req->assoclen);
+
+		/* formatted associated data input */
+		qce_dma_map_sg(pce_dev->pdev, q_req->asg,
+			preq_info->assoc_nents, DMA_TO_DEVICE);
+		preq_info->asg = q_req->asg;
+	} else {
+		preq_info->assoc_nents = 0;
+		preq_info->asg = NULL;
+	}
+	/* cipher input */
+	qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	/* cipher + mac output  for encryption    */
+	if (areq->src != areq->dst) {
+		if (pce_dev->ce_bam_info.minor_version == 0)
+			/*
+			 * The destination scatter list is pointing to the same
+			 * data area as src.
+			 * Note, the associated data will be pass-through
+			 * at the beginning of destination area.
+			 */
+			preq_info->dst_nents = count_sg(areq->dst,
+						out_len + areq->assoclen);
+		else
+			preq_info->dst_nents = count_sg(areq->dst, out_len +
+						areq->assoclen);
+
+		qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+				DMA_FROM_DEVICE);
+	} else {
+		preq_info->dst_nents = preq_info->src_nents;
+	}
+
+	if (pce_dev->support_cmd_dscr) {
+		cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev, req_info,
+								 q_req);
+		if (cmdlistinfo == NULL) {
+			pr_err("Unsupported cipher algorithm %d, mode %d\n",
+						q_req->alg, q_req->mode);
+			qce_free_req_info(pce_dev, req_info, false);
+			return -EINVAL;
+		}
+		/* set up crypto device */
+		rc = _ce_setup_cipher(pce_dev, q_req, totallen_in,
+					q_req->assoclen, cmdlistinfo);
+	} else {
+		/* set up crypto device */
+		rc = _ce_setup_cipher_direct(pce_dev, q_req, totallen_in,
+					q_req->assoclen);
+	}
+
+	if (rc < 0)
+		goto bad;
+
+	preq_info->mode = q_req->mode;
+
+	/* setup for callback, and issue command to bam */
+	preq_info->areq = q_req->areq;
+	preq_info->qce_cb = q_req->qce_cb;
+	preq_info->dir = q_req->dir;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_AEAD;
+	preq_info->req_len = totallen_in;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+
+	if (pce_dev->support_cmd_dscr)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+
+	if (pce_dev->ce_bam_info.minor_version == 0) {
+		goto bad;
+	} else {
+		if (q_req->assoclen && (_qce_sps_add_sg_data(
+			pce_dev, q_req->asg, q_req->assoclen,
+					 &pce_sps_data->in_transfer)))
+			goto bad;
+		if (_qce_sps_add_sg_data_off(pce_dev, areq->src, areq->cryptlen,
+					areq->assoclen,
+					&pce_sps_data->in_transfer))
+			goto bad;
+		_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+		_qce_ccm_get_around_input(pce_dev, preq_info, q_req->dir);
+
+		if (pce_dev->no_get_around)
+			_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+				&pce_sps_data->cmdlistptr.unlock_all_pipes,
+				&pce_sps_data->in_transfer);
+
+		/* Pass through to ignore associated  data*/
+		if (_qce_sps_add_data(
+				GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+				q_req->assoclen,
+				&pce_sps_data->out_transfer))
+			goto bad;
+		if (_qce_sps_add_sg_data_off(pce_dev, areq->dst, out_len,
+					areq->assoclen,
+					&pce_sps_data->out_transfer))
+			goto bad;
+		/* Pass through to ignore hw_pad (padding of the MAC data) */
+		if (_qce_sps_add_data(
+				GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+				hw_pad_out, &pce_sps_data->out_transfer))
+			goto bad;
+		if (pce_dev->no_get_around ||
+				totallen_in <= SPS_MAX_PKT_SIZE) {
+			if (_qce_sps_add_data(
+				GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer))
+				goto bad;
+			pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+		} else {
+			pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+		}
+
+		_qce_ccm_get_around_output(pce_dev, preq_info, q_req->dir);
+
+		select_mode(pce_dev, preq_info);
+		rc = _qce_sps_transfer(pce_dev, req_info);
+		cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	}
+	if (rc)
+		goto bad;
+	return 0;
+
+bad:
+	if (preq_info->assoc_nents) {
+		qce_dma_unmap_sg(pce_dev->pdev, q_req->asg,
+				preq_info->assoc_nents, DMA_TO_DEVICE);
+	}
+	if (preq_info->src_nents) {
+		qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+				(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+								DMA_TO_DEVICE);
+	}
+	if (areq->src != areq->dst) {
+		qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+				DMA_FROM_DEVICE);
+	}
+	qce_free_req_info(pce_dev, req_info, false);
+	return rc;
+}
+
+static int _qce_suspend(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+	struct sps_pipe *sps_pipe_info;
+
+	if (handle == NULL)
+		return -ENODEV;
+
+	qce_enable_clk(pce_dev);
+
+	sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
+	sps_disconnect(sps_pipe_info);
+
+	sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
+	sps_disconnect(sps_pipe_info);
+
+	qce_disable_clk(pce_dev);
+	return 0;
+}
+
+static int _qce_resume(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+	struct sps_pipe *sps_pipe_info;
+	struct sps_connect *sps_connect_info;
+	int rc;
+
+	if (handle == NULL)
+		return -ENODEV;
+
+	qce_enable_clk(pce_dev);
+
+	sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
+	sps_connect_info = &pce_dev->ce_bam_info.consumer.connect;
+	memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
+	rc = sps_connect(sps_pipe_info, sps_connect_info);
+	if (rc) {
+		pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
+			(uintptr_t)sps_pipe_info, rc);
+		return rc;
+	}
+	sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
+	sps_connect_info = &pce_dev->ce_bam_info.producer.connect;
+	memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
+	rc = sps_connect(sps_pipe_info, sps_connect_info);
+	if (rc)
+		pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
+			(uintptr_t)sps_pipe_info, rc);
+
+	rc = sps_register_event(sps_pipe_info,
+					&pce_dev->ce_bam_info.producer.event);
+	if (rc)
+		pr_err("Producer callback registration failed rc = %d\n", rc);
+
+	qce_disable_clk(pce_dev);
+	return rc;
+}
+
+struct qce_pm_table qce_pm_table  = {_qce_suspend, _qce_resume};
+EXPORT_SYMBOL(qce_pm_table);
+
+int qce_aead_req(void *handle, struct qce_req *q_req)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+	struct aead_request *areq;
+	uint32_t authsize;
+	struct crypto_aead *aead;
+	uint32_t ivsize;
+	uint32_t totallen;
+	int rc = 0;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+	int req_info = -1;
+	struct ce_sps_data *pce_sps_data;
+	struct ce_request_info *preq_info;
+
+	if (q_req->mode == QCE_MODE_CCM)
+		return _qce_aead_ccm_req(handle, q_req);
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	areq = (struct aead_request *) q_req->areq;
+	aead = crypto_aead_reqtfm(areq);
+	ivsize = crypto_aead_ivsize(aead);
+	q_req->ivsize = ivsize;
+	authsize = q_req->authsize;
+	if (q_req->dir == QCE_ENCRYPT)
+		q_req->cryptlen = areq->cryptlen;
+	else
+		q_req->cryptlen = areq->cryptlen - authsize;
+
+	if (q_req->cryptlen > UINT_MAX - areq->assoclen) {
+		pr_err("Integer overflow on total aead req length.\n");
+		return -EINVAL;
+	}
+
+	totallen = q_req->cryptlen + areq->assoclen;
+
+	if (pce_dev->support_cmd_dscr) {
+		cmdlistinfo = _ce_get_aead_cmdlistinfo(pce_dev,
+							req_info, q_req);
+		if (cmdlistinfo == NULL) {
+			pr_err("Unsupported aead ciphering algorithm %d, mode %d, ciphering key length %d, auth digest size %d\n",
+				q_req->alg, q_req->mode, q_req->encklen,
+					q_req->authsize);
+			qce_free_req_info(pce_dev, req_info, false);
+			return -EINVAL;
+		}
+		/* set up crypto device */
+		rc = _ce_setup_aead(pce_dev, q_req, totallen,
+					areq->assoclen, cmdlistinfo);
+		if (rc < 0) {
+			qce_free_req_info(pce_dev, req_info, false);
+			return -EINVAL;
+		}
+	}
+
+	/*
+	 * For crypto 5.0 that has burst size alignment requirement
+	 * for data descritpor,
+	 * the agent above(qcrypto) prepares the src scatter list with
+	 * memory starting with associated data, followed by
+	 * iv, and data stream to be ciphered.
+	 */
+	preq_info->src_nents = count_sg(areq->src, totallen);
+
+
+	/* cipher input */
+	qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	/* cipher output  for encryption    */
+	if (areq->src != areq->dst) {
+		preq_info->dst_nents = count_sg(areq->dst, totallen);
+
+		qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+				DMA_FROM_DEVICE);
+	}
+
+
+	/* setup for callback, and issue command to bam */
+	preq_info->areq = q_req->areq;
+	preq_info->qce_cb = q_req->qce_cb;
+	preq_info->dir = q_req->dir;
+	preq_info->asg = NULL;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_AEAD;
+	preq_info->req_len = totallen;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+
+	if (pce_dev->support_cmd_dscr) {
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+	} else {
+		rc = _ce_setup_aead_direct(pce_dev, q_req, totallen,
+					areq->assoclen);
+		if (rc)
+			goto bad;
+	}
+
+	preq_info->mode = q_req->mode;
+
+	if (pce_dev->ce_bam_info.minor_version == 0) {
+		if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen,
+					&pce_sps_data->in_transfer))
+			goto bad;
+
+		_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+		if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
+				&pce_sps_data->out_transfer))
+			goto bad;
+		if (totallen > SPS_MAX_PKT_SIZE) {
+			_qce_set_flag(&pce_sps_data->out_transfer,
+							SPS_IOVEC_FLAG_INT);
+			pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+		} else {
+			if (_qce_sps_add_data(GET_PHYS_ADDR(
+					pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					&pce_sps_data->out_transfer))
+				goto bad;
+			_qce_set_flag(&pce_sps_data->out_transfer,
+							SPS_IOVEC_FLAG_INT);
+			pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+		}
+	rc = _qce_sps_transfer(pce_dev, req_info);
+	} else {
+		if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen,
+					&pce_sps_data->in_transfer))
+			goto bad;
+		_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+		if (pce_dev->no_get_around)
+			_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+				&pce_sps_data->cmdlistptr.unlock_all_pipes,
+				&pce_sps_data->in_transfer);
+
+		if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
+					&pce_sps_data->out_transfer))
+			goto bad;
+
+		if (pce_dev->no_get_around || totallen <= SPS_MAX_PKT_SIZE) {
+			if (_qce_sps_add_data(
+				GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer))
+				goto bad;
+			pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+		} else {
+			pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+		}
+		select_mode(pce_dev, preq_info);
+		rc = _qce_sps_transfer(pce_dev, req_info);
+		cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	}
+	if (rc)
+		goto bad;
+	return 0;
+
+bad:
+	if (preq_info->src_nents)
+		qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+				(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+								DMA_TO_DEVICE);
+	if (areq->src != areq->dst)
+		qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+				DMA_FROM_DEVICE);
+	qce_free_req_info(pce_dev, req_info, false);
+
+	return rc;
+}
+EXPORT_SYMBOL(qce_aead_req);
+
+int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
+{
+	int rc = 0;
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct ablkcipher_request *areq = (struct ablkcipher_request *)
+						c_req->areq;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+	int req_info = -1;
+	struct ce_sps_data *pce_sps_data;
+	struct ce_request_info *preq_info;
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+
+	preq_info->src_nents = 0;
+	preq_info->dst_nents = 0;
+
+	/* cipher input */
+	preq_info->src_nents = count_sg(areq->src, areq->nbytes);
+
+	qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+		(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	/* cipher output */
+	if (areq->src != areq->dst) {
+		preq_info->dst_nents = count_sg(areq->dst, areq->nbytes);
+			qce_dma_map_sg(pce_dev->pdev, areq->dst,
+				preq_info->dst_nents, DMA_FROM_DEVICE);
+	} else {
+		preq_info->dst_nents = preq_info->src_nents;
+	}
+	preq_info->dir = c_req->dir;
+	if  ((pce_dev->ce_bam_info.minor_version == 0) &&
+			(preq_info->dir == QCE_DECRYPT) &&
+			(c_req->mode == QCE_MODE_CBC)) {
+		memcpy(preq_info->dec_iv, (unsigned char *)
+			sg_virt(areq->src) + areq->src->length - 16,
+			NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE);
+	}
+
+	/* set up crypto device */
+	if (pce_dev->support_cmd_dscr) {
+		cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev,
+							req_info, c_req);
+		if (cmdlistinfo == NULL) {
+			pr_err("Unsupported cipher algorithm %d, mode %d\n",
+						c_req->alg, c_req->mode);
+			qce_free_req_info(pce_dev, req_info, false);
+			return -EINVAL;
+		}
+		rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0,
+							cmdlistinfo);
+	} else {
+		rc = _ce_setup_cipher_direct(pce_dev, c_req, areq->nbytes, 0);
+	}
+	if (rc < 0)
+		goto bad;
+
+	preq_info->mode = c_req->mode;
+
+	/* setup for client callback, and issue command to BAM */
+	preq_info->areq = areq;
+	preq_info->qce_cb = c_req->qce_cb;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_CIPHERING;
+	preq_info->req_len = areq->nbytes;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+	if (pce_dev->support_cmd_dscr)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+	if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+					&pce_sps_data->in_transfer))
+		goto bad;
+	_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+	if (pce_dev->no_get_around)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+			&pce_sps_data->cmdlistptr.unlock_all_pipes,
+			&pce_sps_data->in_transfer);
+
+	if (_qce_sps_add_sg_data(pce_dev, areq->dst, areq->nbytes,
+					&pce_sps_data->out_transfer))
+		goto bad;
+	if (pce_dev->no_get_around || areq->nbytes <= SPS_MAX_PKT_SIZE) {
+		pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+		if (_qce_sps_add_data(
+				GET_PHYS_ADDR(pce_sps_data->result_dump),
+				CRYPTO_RESULT_DUMP_SIZE,
+				&pce_sps_data->out_transfer))
+			goto bad;
+	} else {
+		pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+	}
+
+	select_mode(pce_dev, preq_info);
+	rc = _qce_sps_transfer(pce_dev, req_info);
+	cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	if (rc)
+		goto bad;
+
+	return 0;
+bad:
+	if (areq->src != areq->dst) {
+		if (preq_info->dst_nents) {
+			qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
+			preq_info->dst_nents, DMA_FROM_DEVICE);
+		}
+	}
+	if (preq_info->src_nents) {
+		qce_dma_unmap_sg(pce_dev->pdev, areq->src,
+				preq_info->src_nents,
+				(areq->src == areq->dst) ?
+				DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	}
+	qce_free_req_info(pce_dev, req_info, false);
+	return rc;
+}
+EXPORT_SYMBOL(qce_ablk_cipher_req);
+
+int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	int rc;
+
+	struct ahash_request *areq;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+	int req_info = -1;
+	struct ce_sps_data *pce_sps_data;
+	struct ce_request_info *preq_info;
+	bool is_dummy = false;
+
+	if (!sreq) {
+		sreq = &(pce_dev->dummyreq.sreq);
+		req_info = DUMMY_REQ_INDEX;
+		is_dummy = true;
+	} else {
+		req_info = qce_alloc_req_info(pce_dev);
+		if (req_info < 0)
+			return -EBUSY;
+	}
+
+	areq = (struct ahash_request *)sreq->areq;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+
+	preq_info->src_nents = count_sg(sreq->src, sreq->size);
+	qce_dma_map_sg(pce_dev->pdev, sreq->src, preq_info->src_nents,
+							DMA_TO_DEVICE);
+
+	if (pce_dev->support_cmd_dscr) {
+		cmdlistinfo = _ce_get_hash_cmdlistinfo(pce_dev, req_info, sreq);
+		if (cmdlistinfo == NULL) {
+			pr_err("Unsupported hash algorithm %d\n", sreq->alg);
+			qce_free_req_info(pce_dev, req_info, false);
+			return -EINVAL;
+		}
+		rc = _ce_setup_hash(pce_dev, sreq, cmdlistinfo);
+	} else {
+		rc = _ce_setup_hash_direct(pce_dev, sreq);
+	}
+	if (rc < 0)
+		goto bad;
+
+	preq_info->areq = areq;
+	preq_info->qce_cb = sreq->qce_cb;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_HASHING;
+	preq_info->req_len = sreq->size;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+
+	if (pce_dev->support_cmd_dscr)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+	if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+						 &pce_sps_data->in_transfer))
+		goto bad;
+
+	/* always ensure there is input data. ZLT does not work for bam-ndp */
+	if (!areq->nbytes)
+		_qce_sps_add_data(
+			GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+			pce_dev->ce_bam_info.ce_burst_size,
+			&pce_sps_data->in_transfer);
+	_qce_set_flag(&pce_sps_data->in_transfer,
+					SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+	if (pce_dev->no_get_around)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+			&pce_sps_data->cmdlistptr.unlock_all_pipes,
+			&pce_sps_data->in_transfer);
+
+	if (_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer))
+		goto bad;
+
+	if (is_dummy) {
+		_qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
+		rc = _qce_sps_transfer(pce_dev, req_info);
+	} else {
+		select_mode(pce_dev, preq_info);
+		rc = _qce_sps_transfer(pce_dev, req_info);
+		cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	}
+	if (rc)
+		goto bad;
+	return 0;
+bad:
+	if (preq_info->src_nents) {
+		qce_dma_unmap_sg(pce_dev->pdev, sreq->src,
+				preq_info->src_nents, DMA_TO_DEVICE);
+	}
+	qce_free_req_info(pce_dev, req_info, false);
+	return rc;
+}
+EXPORT_SYMBOL(qce_process_sha_req);
+
+int qce_f8_req(void *handle, struct qce_f8_req *req,
+			void *cookie, qce_comp_func_ptr_t qce_cb)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	bool key_stream_mode;
+	dma_addr_t dst;
+	int rc;
+	struct qce_cmdlist_info *cmdlistinfo;
+	int req_info = -1;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
+		break;
+	default:
+		qce_free_req_info(pce_dev, req_info, false);
+		return -EINVAL;
+	};
+
+	key_stream_mode = (req->data_in == NULL);
+
+	/* don't support key stream mode */
+
+	if (key_stream_mode || (req->bearer >= QCE_OTA_MAX_BEARER)) {
+		qce_free_req_info(pce_dev, req_info, false);
+		return -EINVAL;
+	}
+
+	/* F8 cipher input       */
+	preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
+					req->data_in, req->data_len,
+					(req->data_in == req->data_out) ?
+					DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+
+	/* F8 cipher output     */
+	if (req->data_in != req->data_out) {
+		dst = dma_map_single(pce_dev->pdev, req->data_out,
+				req->data_len, DMA_FROM_DEVICE);
+		preq_info->phy_ota_dst = dst;
+	} else {
+		/* in place ciphering */
+		dst = preq_info->phy_ota_src;
+		preq_info->phy_ota_dst = 0;
+	}
+	preq_info->ota_size = req->data_len;
+
+
+	/* set up crypto device */
+	if (pce_dev->support_cmd_dscr)
+		rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0,
+				 req->data_len, cmdlistinfo);
+	else
+		rc = _ce_f8_setup_direct(pce_dev, req, key_stream_mode, 1, 0,
+				 req->data_len);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for callback, and issue command to sps */
+	preq_info->areq = cookie;
+	preq_info->qce_cb = qce_cb;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_F8;
+	preq_info->req_len = req->data_len;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+
+	if (pce_dev->support_cmd_dscr)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+
+	_qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->data_len,
+					&pce_sps_data->in_transfer);
+
+	_qce_set_flag(&pce_sps_data->in_transfer,
+			SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+	_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+			&pce_sps_data->cmdlistptr.unlock_all_pipes,
+					&pce_sps_data->in_transfer);
+
+	_qce_sps_add_data((uint32_t)dst, req->data_len,
+					&pce_sps_data->out_transfer);
+
+	_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer);
+
+	select_mode(pce_dev, preq_info);
+	rc = _qce_sps_transfer(pce_dev, req_info);
+	cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	if (rc)
+		goto bad;
+	return 0;
+bad:
+	if (preq_info->phy_ota_dst != 0)
+		dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
+				req->data_len, DMA_FROM_DEVICE);
+	if (preq_info->phy_ota_src != 0)
+		dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+				req->data_len,
+				(req->data_in == req->data_out) ?
+					DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	qce_free_req_info(pce_dev, req_info, false);
+	return rc;
+}
+EXPORT_SYMBOL(qce_f8_req);
+
+int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
+			void *cookie, qce_comp_func_ptr_t qce_cb)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	uint16_t num_pkt = mreq->num_pkt;
+	uint16_t cipher_start = mreq->cipher_start;
+	uint16_t cipher_size = mreq->cipher_size;
+	struct qce_f8_req *req = &mreq->qce_f8_req;
+	uint32_t total;
+	dma_addr_t dst = 0;
+	int rc = 0;
+	struct qce_cmdlist_info *cmdlistinfo;
+	int req_info = -1;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
+		break;
+	default:
+		qce_free_req_info(pce_dev, req_info, false);
+		return -EINVAL;
+	};
+
+	total = num_pkt *  req->data_len;
+
+	/* F8 cipher input       */
+	preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
+				req->data_in, total,
+				(req->data_in == req->data_out) ?
+				DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+
+	/* F8 cipher output      */
+	if (req->data_in != req->data_out) {
+		dst = dma_map_single(pce_dev->pdev, req->data_out, total,
+						DMA_FROM_DEVICE);
+		preq_info->phy_ota_dst = dst;
+	} else {
+		/* in place ciphering */
+		dst = preq_info->phy_ota_src;
+		preq_info->phy_ota_dst = 0;
+	}
+
+	preq_info->ota_size = total;
+
+	/* set up crypto device */
+	if (pce_dev->support_cmd_dscr)
+		rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
+			cipher_size, cmdlistinfo);
+	else
+		rc = _ce_f8_setup_direct(pce_dev, req, false, num_pkt,
+			cipher_start, cipher_size);
+	if (rc)
+		goto bad;
+
+	/* setup for callback, and issue command to sps */
+	preq_info->areq = cookie;
+	preq_info->qce_cb = qce_cb;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_F8;
+	preq_info->req_len = total;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+
+	if (pce_dev->support_cmd_dscr)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+
+	_qce_sps_add_data((uint32_t)preq_info->phy_ota_src, total,
+					&pce_sps_data->in_transfer);
+	_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+	_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+			&pce_sps_data->cmdlistptr.unlock_all_pipes,
+					&pce_sps_data->in_transfer);
+
+	_qce_sps_add_data((uint32_t)dst, total,
+					&pce_sps_data->out_transfer);
+
+	_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer);
+
+	select_mode(pce_dev, preq_info);
+	rc = _qce_sps_transfer(pce_dev, req_info);
+	cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+
+	if (rc == 0)
+		return 0;
+bad:
+	if (preq_info->phy_ota_dst)
+		dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst, total,
+				DMA_FROM_DEVICE);
+	dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, total,
+				(req->data_in == req->data_out) ?
+				DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	qce_free_req_info(pce_dev, req_info, false);
+	return rc;
+}
+EXPORT_SYMBOL(qce_f8_multi_pkt_req);
+
+int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
+			qce_comp_func_ptr_t qce_cb)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	int rc;
+	struct qce_cmdlist_info *cmdlistinfo;
+	int req_info = -1;
+	struct ce_sps_data *pce_sps_data;
+	struct ce_request_info *preq_info;
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f9_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f9_snow3g;
+		break;
+	default:
+		qce_free_req_info(pce_dev, req_info, false);
+		return -EINVAL;
+	};
+
+	preq_info->phy_ota_src = dma_map_single(pce_dev->pdev, req->message,
+			req->msize, DMA_TO_DEVICE);
+
+	preq_info->ota_size = req->msize;
+
+	if (pce_dev->support_cmd_dscr)
+		rc = _ce_f9_setup(pce_dev, req, cmdlistinfo);
+	else
+		rc = _ce_f9_setup_direct(pce_dev, req);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for callback, and issue command to sps */
+	preq_info->areq = cookie;
+	preq_info->qce_cb = qce_cb;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_F9;
+	preq_info->req_len = req->msize;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+	if (pce_dev->support_cmd_dscr)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+	_qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->msize,
+					&pce_sps_data->in_transfer);
+	_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+	_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+			&pce_sps_data->cmdlistptr.unlock_all_pipes,
+					&pce_sps_data->in_transfer);
+
+	_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer);
+
+	select_mode(pce_dev, preq_info);
+	rc = _qce_sps_transfer(pce_dev, req_info);
+	cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	if (rc)
+		goto bad;
+	return 0;
+bad:
+	dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+				req->msize, DMA_TO_DEVICE);
+	qce_free_req_info(pce_dev, req_info, false);
+	return rc;
+}
+EXPORT_SYMBOL(qce_f9_req);
+
+static int __qce_get_device_tree_data(struct platform_device *pdev,
+		struct qce_device *pce_dev)
+{
+	struct resource *resource;
+	int rc = 0;
+
+	pce_dev->is_shared = of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,ce-hw-shared");
+	pce_dev->support_hw_key = of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,ce-hw-key");
+
+	pce_dev->use_sw_aes_cbc_ecb_ctr_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-aes-cbc-ecb-ctr-algo");
+	pce_dev->use_sw_aead_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-aead-algo");
+	pce_dev->use_sw_aes_xts_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-aes-xts-algo");
+	pce_dev->use_sw_ahash_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-ahash-algo");
+	pce_dev->use_sw_hmac_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-hmac-algo");
+	pce_dev->use_sw_aes_ccm_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-aes-ccm-algo");
+	pce_dev->support_clk_mgmt_sus_res = of_property_read_bool(
+		(&pdev->dev)->of_node, "qcom,clk-mgmt-sus-res");
+	pce_dev->support_only_core_src_clk = of_property_read_bool(
+		(&pdev->dev)->of_node, "qcom,support-core-clk-only");
+
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,bam-pipe-pair",
+				&pce_dev->ce_bam_info.pipe_pair_index)) {
+		pr_err("Fail to get bam pipe pair information.\n");
+		return -EINVAL;
+	}
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,ce-device",
+				&pce_dev->ce_bam_info.ce_device)) {
+		pr_err("Fail to get CE device information.\n");
+		return -EINVAL;
+	}
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,ce-hw-instance",
+				&pce_dev->ce_bam_info.ce_hw_instance)) {
+		pr_err("Fail to get CE hw instance information.\n");
+		return -EINVAL;
+	}
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,bam-ee",
+				&pce_dev->ce_bam_info.bam_ee)) {
+		pr_info("BAM Apps EE is not defined, setting to default 1\n");
+		pce_dev->ce_bam_info.bam_ee = 1;
+	}
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,ce-opp-freq",
+				&pce_dev->ce_opp_freq_hz)) {
+		pr_info("CE operating frequency is not defined, setting to default 100MHZ\n");
+		pce_dev->ce_opp_freq_hz = CE_CLK_100MHZ;
+	}
+	pce_dev->ce_bam_info.dest_pipe_index	=
+			2 * pce_dev->ce_bam_info.pipe_pair_index;
+	pce_dev->ce_bam_info.src_pipe_index	=
+			pce_dev->ce_bam_info.dest_pipe_index + 1;
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"crypto-base");
+	if (resource) {
+		pce_dev->phy_iobase = resource->start;
+		pce_dev->iobase = ioremap_nocache(resource->start,
+					resource_size(resource));
+		if (!pce_dev->iobase) {
+			pr_err("Can not map CRYPTO io memory\n");
+			return -ENOMEM;
+		}
+	} else {
+		pr_err("CRYPTO HW mem unavailable.\n");
+		return -ENODEV;
+	}
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"crypto-bam-base");
+	if (resource) {
+		pce_dev->bam_mem = resource->start;
+		pce_dev->bam_mem_size = resource_size(resource);
+	} else {
+		pr_err("CRYPTO BAM mem unavailable.\n");
+		rc = -ENODEV;
+		goto err_getting_bam_info;
+	}
+
+	resource  = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (resource) {
+		pce_dev->ce_bam_info.bam_irq = resource->start;
+	} else {
+		pr_err("CRYPTO BAM IRQ unavailable.\n");
+		goto err_dev;
+	}
+	return rc;
+err_dev:
+	if (pce_dev->ce_bam_info.bam_iobase)
+		iounmap(pce_dev->ce_bam_info.bam_iobase);
+
+err_getting_bam_info:
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+
+	return rc;
+}
+
+static int __qce_init_clk(struct qce_device *pce_dev)
+{
+	int rc = 0;
+
+	pce_dev->ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src");
+	if (!IS_ERR(pce_dev->ce_core_src_clk)) {
+		rc = clk_set_rate(pce_dev->ce_core_src_clk,
+						pce_dev->ce_opp_freq_hz);
+		if (rc) {
+			pr_err("Unable to set the core src clk @%uMhz.\n",
+					pce_dev->ce_opp_freq_hz/CE_CLK_DIV);
+			goto exit_put_core_src_clk;
+		}
+	} else {
+		if (pce_dev->support_only_core_src_clk) {
+			rc = PTR_ERR(pce_dev->ce_core_src_clk);
+			pce_dev->ce_core_src_clk = NULL;
+			pr_err("Unable to get CE core src clk\n");
+			return rc;
+		}
+		pr_warn("Unable to get CE core src clk, set to NULL\n");
+		pce_dev->ce_core_src_clk = NULL;
+	}
+
+	if (pce_dev->support_only_core_src_clk) {
+		pce_dev->ce_core_clk = NULL;
+		pce_dev->ce_clk = NULL;
+		pce_dev->ce_bus_clk = NULL;
+	} else {
+		pce_dev->ce_core_clk = clk_get(pce_dev->pdev, "core_clk");
+		if (IS_ERR(pce_dev->ce_core_clk)) {
+			rc = PTR_ERR(pce_dev->ce_core_clk);
+			pr_err("Unable to get CE core clk\n");
+			goto exit_put_core_src_clk;
+		}
+		pce_dev->ce_clk = clk_get(pce_dev->pdev, "iface_clk");
+		if (IS_ERR(pce_dev->ce_clk)) {
+			rc = PTR_ERR(pce_dev->ce_clk);
+			pr_err("Unable to get CE interface clk\n");
+			goto exit_put_core_clk;
+		}
+
+		pce_dev->ce_bus_clk = clk_get(pce_dev->pdev, "bus_clk");
+		if (IS_ERR(pce_dev->ce_bus_clk)) {
+			rc = PTR_ERR(pce_dev->ce_bus_clk);
+			pr_err("Unable to get CE BUS interface clk\n");
+			goto exit_put_iface_clk;
+		}
+	}
+	return rc;
+
+exit_put_iface_clk:
+	if (pce_dev->ce_clk)
+		clk_put(pce_dev->ce_clk);
+exit_put_core_clk:
+	if (pce_dev->ce_core_clk)
+		clk_put(pce_dev->ce_core_clk);
+exit_put_core_src_clk:
+	if (pce_dev->ce_core_src_clk)
+		clk_put(pce_dev->ce_core_src_clk);
+	pr_err("Unable to init CE clks, rc = %d\n", rc);
+	return rc;
+}
+
+static void __qce_deinit_clk(struct qce_device *pce_dev)
+{
+	if (pce_dev->ce_bus_clk)
+		clk_put(pce_dev->ce_bus_clk);
+	if (pce_dev->ce_clk)
+		clk_put(pce_dev->ce_clk);
+	if (pce_dev->ce_core_clk)
+		clk_put(pce_dev->ce_core_clk);
+	if (pce_dev->ce_core_src_clk)
+		clk_put(pce_dev->ce_core_src_clk);
+}
+
+int qce_enable_clk(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+	int rc = 0;
+
+	if (pce_dev->ce_core_src_clk) {
+		rc = clk_prepare_enable(pce_dev->ce_core_src_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE core src clk\n");
+			return rc;
+		}
+	}
+
+	if (pce_dev->support_only_core_src_clk)
+		return rc;
+
+	if (pce_dev->ce_core_clk) {
+		rc = clk_prepare_enable(pce_dev->ce_core_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE core clk\n");
+			goto exit_disable_core_src_clk;
+		}
+	}
+
+	if (pce_dev->ce_clk) {
+		rc = clk_prepare_enable(pce_dev->ce_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE iface clk\n");
+			goto exit_disable_core_clk;
+		}
+	}
+
+	if (pce_dev->ce_bus_clk) {
+		rc = clk_prepare_enable(pce_dev->ce_bus_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE BUS clk\n");
+			goto exit_disable_ce_clk;
+		}
+	}
+	return rc;
+
+exit_disable_ce_clk:
+	if (pce_dev->ce_clk)
+		clk_disable_unprepare(pce_dev->ce_clk);
+exit_disable_core_clk:
+	if (pce_dev->ce_core_clk)
+		clk_disable_unprepare(pce_dev->ce_core_clk);
+exit_disable_core_src_clk:
+	if (pce_dev->ce_core_src_clk)
+		clk_disable_unprepare(pce_dev->ce_core_src_clk);
+	return rc;
+}
+EXPORT_SYMBOL(qce_enable_clk);
+
+int qce_disable_clk(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	int rc = 0;
+
+	if (pce_dev->ce_bus_clk)
+		clk_disable_unprepare(pce_dev->ce_bus_clk);
+	if (pce_dev->ce_clk)
+		clk_disable_unprepare(pce_dev->ce_clk);
+	if (pce_dev->ce_core_clk)
+		clk_disable_unprepare(pce_dev->ce_core_clk);
+	if (pce_dev->ce_core_src_clk)
+		clk_disable_unprepare(pce_dev->ce_core_src_clk);
+
+	return rc;
+}
+EXPORT_SYMBOL(qce_disable_clk);
+
+/* dummy req setup */
+static int setup_dummy_req(struct qce_device *pce_dev)
+{
+	char *input =
+	"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopqopqrpqrs";
+	int len = DUMMY_REQ_DATA_LEN;
+
+	memcpy(pce_dev->dummyreq_in_buf, input, len);
+	sg_set_buf(&pce_dev->dummyreq.sg, pce_dev->dummyreq_in_buf, len);
+	sg_mark_end(&pce_dev->dummyreq.sg);
+
+	pce_dev->dummyreq.sreq.alg = QCE_HASH_SHA1;
+	pce_dev->dummyreq.sreq.qce_cb = qce_dummy_complete;
+	pce_dev->dummyreq.sreq.src = &pce_dev->dummyreq.sg;
+	pce_dev->dummyreq.sreq.auth_data[0] = 0;
+	pce_dev->dummyreq.sreq.auth_data[1] = 0;
+	pce_dev->dummyreq.sreq.auth_data[2] = 0;
+	pce_dev->dummyreq.sreq.auth_data[3] = 0;
+	pce_dev->dummyreq.sreq.first_blk = 1;
+	pce_dev->dummyreq.sreq.last_blk = 1;
+	pce_dev->dummyreq.sreq.size = len;
+	pce_dev->dummyreq.sreq.areq = &pce_dev->dummyreq.areq;
+	pce_dev->dummyreq.sreq.flags = 0;
+	pce_dev->dummyreq.sreq.authkey = NULL;
+
+	pce_dev->dummyreq.areq.src = pce_dev->dummyreq.sreq.src;
+	pce_dev->dummyreq.areq.nbytes = pce_dev->dummyreq.sreq.size;
+
+	return 0;
+}
+
+/* crypto engine open function. */
+void *qce_open(struct platform_device *pdev, int *rc)
+{
+	struct qce_device *pce_dev;
+	int i;
+	static int pcedev_no = 1;
+
+	pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
+	if (!pce_dev) {
+		*rc = -ENOMEM;
+		pr_err("Can not allocate memory: %d\n", *rc);
+		return NULL;
+	}
+	pce_dev->pdev = &pdev->dev;
+
+	mutex_lock(&qce_iomap_mutex);
+	if (pdev->dev.of_node) {
+		*rc = __qce_get_device_tree_data(pdev, pce_dev);
+		if (*rc)
+			goto err_pce_dev;
+	} else {
+		*rc = -EINVAL;
+		pr_err("Device Node not found.\n");
+		goto err_pce_dev;
+	}
+
+	for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++)
+		pce_dev->ce_request_info[i].in_use = false;
+	pce_dev->ce_request_index = 0;
+
+	pce_dev->memsize = 10 * PAGE_SIZE * MAX_QCE_ALLOC_BAM_REQ;
+	pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
+			pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL);
+
+	if (pce_dev->coh_vmem == NULL) {
+		*rc = -ENOMEM;
+		pr_err("Can not allocate coherent memory for sps data\n");
+		goto err_iobase;
+	}
+
+	pce_dev->iovec_memsize = TOTAL_IOVEC_SPACE_PER_PIPE *
+						MAX_QCE_ALLOC_BAM_REQ * 2;
+	pce_dev->iovec_vmem = kzalloc(pce_dev->iovec_memsize, GFP_KERNEL);
+	if (pce_dev->iovec_vmem == NULL)
+		goto err_mem;
+
+	pce_dev->dummyreq_in_buf = kzalloc(DUMMY_REQ_DATA_LEN, GFP_KERNEL);
+	if (pce_dev->dummyreq_in_buf == NULL)
+		goto err_mem;
+
+	*rc = __qce_init_clk(pce_dev);
+	if (*rc)
+		goto err_mem;
+	*rc = qce_enable_clk(pce_dev);
+	if (*rc)
+		goto err_enable_clk;
+
+	if (_probe_ce_engine(pce_dev)) {
+		*rc = -ENXIO;
+		goto err;
+	}
+	*rc = 0;
+
+	qce_init_ce_cfg_val(pce_dev);
+	*rc  = qce_sps_init(pce_dev);
+	if (*rc)
+		goto err;
+	qce_setup_ce_sps_data(pce_dev);
+	qce_disable_clk(pce_dev);
+	setup_dummy_req(pce_dev);
+	atomic_set(&pce_dev->no_of_queued_req, 0);
+	pce_dev->mode = IN_INTERRUPT_MODE;
+	init_timer(&(pce_dev->timer));
+	pce_dev->timer.function = qce_multireq_timeout;
+	pce_dev->timer.data = (unsigned long)pce_dev;
+	pce_dev->timer.expires = jiffies + DELAY_IN_JIFFIES;
+	pce_dev->intr_cadence = 0;
+	pce_dev->dev_no = pcedev_no;
+	pcedev_no++;
+	pce_dev->owner = QCE_OWNER_NONE;
+	mutex_unlock(&qce_iomap_mutex);
+	return pce_dev;
+err:
+	qce_disable_clk(pce_dev);
+
+err_enable_clk:
+	__qce_deinit_clk(pce_dev);
+
+err_mem:
+	kfree(pce_dev->dummyreq_in_buf);
+	kfree(pce_dev->iovec_vmem);
+	if (pce_dev->coh_vmem)
+		dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
+			pce_dev->coh_vmem, pce_dev->coh_pmem);
+err_iobase:
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+err_pce_dev:
+	mutex_unlock(&qce_iomap_mutex);
+	kfree(pce_dev);
+	return NULL;
+}
+EXPORT_SYMBOL(qce_open);
+
+/* crypto engine close function. */
+int qce_close(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	if (handle == NULL)
+		return -ENODEV;
+
+	mutex_lock(&qce_iomap_mutex);
+	qce_enable_clk(pce_dev);
+	qce_sps_exit(pce_dev);
+
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+	if (pce_dev->coh_vmem)
+		dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
+				pce_dev->coh_vmem, pce_dev->coh_pmem);
+	kfree(pce_dev->dummyreq_in_buf);
+	kfree(pce_dev->iovec_vmem);
+
+	qce_disable_clk(pce_dev);
+	__qce_deinit_clk(pce_dev);
+	mutex_unlock(&qce_iomap_mutex);
+	kfree(handle);
+
+	return 0;
+}
+EXPORT_SYMBOL(qce_close);
+
+#define OTA_SUPPORT_MASK (1 << CRYPTO_ENCR_SNOW3G_SEL |\
+				1 << CRYPTO_ENCR_KASUMI_SEL |\
+				1 << CRYPTO_AUTH_SNOW3G_SEL |\
+				1 << CRYPTO_AUTH_KASUMI_SEL)
+
+int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+
+	if (ce_support == NULL)
+		return -EINVAL;
+
+	ce_support->sha1_hmac_20 = false;
+	ce_support->sha1_hmac = false;
+	ce_support->sha256_hmac = false;
+	ce_support->sha_hmac = true;
+	ce_support->cmac  = true;
+	ce_support->aes_key_192 = false;
+	ce_support->aes_xts = true;
+	if ((pce_dev->engines_avail & OTA_SUPPORT_MASK) == OTA_SUPPORT_MASK)
+		ce_support->ota = true;
+	else
+		ce_support->ota = false;
+	ce_support->bam = true;
+	ce_support->is_shared = (pce_dev->is_shared == 1) ? true : false;
+	ce_support->hw_key = pce_dev->support_hw_key;
+	ce_support->aes_ccm = true;
+	ce_support->clk_mgmt_sus_res = pce_dev->support_clk_mgmt_sus_res;
+	if (pce_dev->ce_bam_info.minor_version)
+		ce_support->aligned_only = false;
+	else
+		ce_support->aligned_only = true;
+
+	ce_support->use_sw_aes_cbc_ecb_ctr_algo =
+				pce_dev->use_sw_aes_cbc_ecb_ctr_algo;
+	ce_support->use_sw_aead_algo =
+				pce_dev->use_sw_aead_algo;
+	ce_support->use_sw_aes_xts_algo =
+				pce_dev->use_sw_aes_xts_algo;
+	ce_support->use_sw_ahash_algo =
+				pce_dev->use_sw_ahash_algo;
+	ce_support->use_sw_hmac_algo =
+				pce_dev->use_sw_hmac_algo;
+	ce_support->use_sw_aes_ccm_algo =
+				pce_dev->use_sw_aes_ccm_algo;
+	ce_support->ce_device = pce_dev->ce_bam_info.ce_device;
+	ce_support->ce_hw_instance = pce_dev->ce_bam_info.ce_hw_instance;
+	if (pce_dev->no_get_around)
+		ce_support->max_request = MAX_QCE_BAM_REQ;
+	else
+		ce_support->max_request = 1;
+	return 0;
+}
+EXPORT_SYMBOL(qce_hw_support);
+
+void qce_dump_req(void *handle)
+{
+	int i;
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+
+	for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
+		pr_info("qce_dump_req %d %d\n", i,
+					pce_dev->ce_request_info[i].in_use);
+		if (pce_dev->ce_request_info[i].in_use == true)
+			_qce_dump_descr_fifos(pce_dev, i);
+	}
+}
+EXPORT_SYMBOL(qce_dump_req);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Crypto Engine driver");
diff --git a/drivers/crypto/msm/qce50.h b/drivers/crypto/msm/qce50.h
new file mode 100644
index 0000000..0e60bd2
--- /dev/null
+++ b/drivers/crypto/msm/qce50.h
@@ -0,0 +1,245 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _DRIVERS_CRYPTO_MSM_QCE50_H_
+#define _DRIVERS_CRYPTO_MSM_QCE50_H_
+
+#include <linux/msm-sps.h>
+
+/* MAX Data xfer block size between BAM and CE */
+#define MAX_CE_BAM_BURST_SIZE   0x40
+#define QCEBAM_BURST_SIZE	MAX_CE_BAM_BURST_SIZE
+
+#define GET_VIRT_ADDR(x)  \
+		((uintptr_t)pce_dev->coh_vmem +			\
+		((uintptr_t)x - (uintptr_t)pce_dev->coh_pmem))
+#define GET_PHYS_ADDR(x)  \
+		(phys_addr_t)(((uintptr_t)pce_dev->coh_pmem +	\
+		((uintptr_t)x - (uintptr_t)pce_dev->coh_vmem)))
+
+#define CRYPTO_REG_SIZE 4
+#define NUM_OF_CRYPTO_AUTH_IV_REG 16
+#define NUM_OF_CRYPTO_CNTR_IV_REG 4
+#define NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG 4
+#define CRYPTO_TOTAL_REGISTERS_DUMPED   26
+#define CRYPTO_RESULT_DUMP_SIZE   \
+	ALIGN((CRYPTO_TOTAL_REGISTERS_DUMPED * CRYPTO_REG_SIZE), \
+	QCEBAM_BURST_SIZE)
+
+/* QCE max number of descriptor in a descriptor list */
+#define QCE_MAX_NUM_DESC    128
+#define SPS_MAX_PKT_SIZE  (32 * 1024  - 64)
+
+/* default bam ipc log level */
+#define QCE_BAM_DEFAULT_IPC_LOGLVL 2
+
+/* State of consumer/producer Pipe */
+enum qce_pipe_st_enum {
+	QCE_PIPE_STATE_IDLE = 0,
+	QCE_PIPE_STATE_IN_PROG = 1,
+	QCE_PIPE_STATE_COMP = 2,
+	QCE_PIPE_STATE_LAST
+};
+
+enum qce_xfer_type_enum {
+	QCE_XFER_HASHING,
+	QCE_XFER_CIPHERING,
+	QCE_XFER_AEAD,
+	QCE_XFER_F8,
+	QCE_XFER_F9,
+	QCE_XFER_TYPE_LAST
+};
+
+struct qce_sps_ep_conn_data {
+	struct sps_pipe			*pipe;
+	struct sps_connect		connect;
+	struct sps_register_event	event;
+};
+
+/* CE Result DUMP format*/
+struct ce_result_dump_format {
+	uint32_t auth_iv[NUM_OF_CRYPTO_AUTH_IV_REG];
+	uint32_t auth_byte_count[NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG];
+	uint32_t encr_cntr_iv[NUM_OF_CRYPTO_CNTR_IV_REG];
+	uint32_t status;
+	uint32_t status2;
+};
+
+struct qce_cmdlist_info {
+
+	unsigned long cmdlist;
+	struct sps_command_element *crypto_cfg;
+	struct sps_command_element *encr_seg_cfg;
+	struct sps_command_element *encr_seg_size;
+	struct sps_command_element *encr_seg_start;
+	struct sps_command_element *encr_key;
+	struct sps_command_element *encr_xts_key;
+	struct sps_command_element *encr_cntr_iv;
+	struct sps_command_element *encr_ccm_cntr_iv;
+	struct sps_command_element *encr_mask;
+	struct sps_command_element *encr_xts_du_size;
+
+	struct sps_command_element *auth_seg_cfg;
+	struct sps_command_element *auth_seg_size;
+	struct sps_command_element *auth_seg_start;
+	struct sps_command_element *auth_key;
+	struct sps_command_element *auth_iv;
+	struct sps_command_element *auth_nonce_info;
+	struct sps_command_element *auth_bytecount;
+	struct sps_command_element *seg_size;
+	struct sps_command_element *go_proc;
+	ptrdiff_t size;
+};
+
+struct qce_cmdlistptr_ops {
+	struct qce_cmdlist_info cipher_aes_128_cbc_ctr;
+	struct qce_cmdlist_info cipher_aes_256_cbc_ctr;
+	struct qce_cmdlist_info cipher_aes_128_ecb;
+	struct qce_cmdlist_info cipher_aes_256_ecb;
+	struct qce_cmdlist_info cipher_aes_128_xts;
+	struct qce_cmdlist_info cipher_aes_256_xts;
+	struct qce_cmdlist_info cipher_des_cbc;
+	struct qce_cmdlist_info cipher_des_ecb;
+	struct qce_cmdlist_info cipher_3des_cbc;
+	struct qce_cmdlist_info cipher_3des_ecb;
+	struct qce_cmdlist_info auth_sha1;
+	struct qce_cmdlist_info auth_sha256;
+	struct qce_cmdlist_info auth_sha1_hmac;
+	struct qce_cmdlist_info auth_sha256_hmac;
+	struct qce_cmdlist_info auth_aes_128_cmac;
+	struct qce_cmdlist_info auth_aes_256_cmac;
+	struct qce_cmdlist_info aead_hmac_sha1_cbc_aes_128;
+	struct qce_cmdlist_info aead_hmac_sha1_cbc_aes_256;
+	struct qce_cmdlist_info aead_hmac_sha1_cbc_des;
+	struct qce_cmdlist_info aead_hmac_sha1_cbc_3des;
+	struct qce_cmdlist_info aead_hmac_sha256_cbc_aes_128;
+	struct qce_cmdlist_info aead_hmac_sha256_cbc_aes_256;
+	struct qce_cmdlist_info aead_hmac_sha256_cbc_des;
+	struct qce_cmdlist_info aead_hmac_sha256_cbc_3des;
+	struct qce_cmdlist_info aead_aes_128_ccm;
+	struct qce_cmdlist_info aead_aes_256_ccm;
+	struct qce_cmdlist_info cipher_null;
+	struct qce_cmdlist_info f8_kasumi;
+	struct qce_cmdlist_info f8_snow3g;
+	struct qce_cmdlist_info f9_kasumi;
+	struct qce_cmdlist_info f9_snow3g;
+	struct qce_cmdlist_info unlock_all_pipes;
+};
+
+struct qce_ce_cfg_reg_setting {
+	uint32_t crypto_cfg_be;
+	uint32_t crypto_cfg_le;
+
+	uint32_t encr_cfg_aes_cbc_128;
+	uint32_t encr_cfg_aes_cbc_256;
+
+	uint32_t encr_cfg_aes_ecb_128;
+	uint32_t encr_cfg_aes_ecb_256;
+
+	uint32_t encr_cfg_aes_xts_128;
+	uint32_t encr_cfg_aes_xts_256;
+
+	uint32_t encr_cfg_aes_ctr_128;
+	uint32_t encr_cfg_aes_ctr_256;
+
+	uint32_t encr_cfg_aes_ccm_128;
+	uint32_t encr_cfg_aes_ccm_256;
+
+	uint32_t encr_cfg_des_cbc;
+	uint32_t encr_cfg_des_ecb;
+
+	uint32_t encr_cfg_3des_cbc;
+	uint32_t encr_cfg_3des_ecb;
+	uint32_t encr_cfg_kasumi;
+	uint32_t encr_cfg_snow3g;
+
+	uint32_t auth_cfg_cmac_128;
+	uint32_t auth_cfg_cmac_256;
+
+	uint32_t auth_cfg_sha1;
+	uint32_t auth_cfg_sha256;
+
+	uint32_t auth_cfg_hmac_sha1;
+	uint32_t auth_cfg_hmac_sha256;
+
+	uint32_t auth_cfg_aes_ccm_128;
+	uint32_t auth_cfg_aes_ccm_256;
+	uint32_t auth_cfg_aead_sha1_hmac;
+	uint32_t auth_cfg_aead_sha256_hmac;
+	uint32_t auth_cfg_kasumi;
+	uint32_t auth_cfg_snow3g;
+};
+
+struct ce_bam_info {
+	uint32_t			bam_irq;
+	uint32_t			bam_mem;
+	void __iomem			*bam_iobase;
+	uint32_t			ce_device;
+	uint32_t			ce_hw_instance;
+	uint32_t			bam_ee;
+	unsigned int			pipe_pair_index;
+	unsigned int			src_pipe_index;
+	unsigned int			dest_pipe_index;
+	unsigned long			bam_handle;
+	int				ce_burst_size;
+	uint32_t			minor_version;
+	struct qce_sps_ep_conn_data	producer;
+	struct qce_sps_ep_conn_data	consumer;
+};
+
+/* SPS data structure with buffers, commandlists & commmand pointer lists */
+struct ce_sps_data {
+	enum qce_pipe_st_enum producer_state;	/* Producer pipe state */
+	int consumer_status;		/* consumer pipe status */
+	int producer_status;		/* producer pipe status */
+	struct sps_transfer in_transfer;
+	struct sps_transfer out_transfer;
+	struct qce_cmdlistptr_ops cmdlistptr;
+	uint32_t result_dump; /* reuslt dump virtual address */
+	uint32_t result_dump_null;
+	uint32_t result_dump_phy; /* result dump physical address (32 bits) */
+	uint32_t result_dump_null_phy;
+
+	uint32_t ignore_buffer; /* ignore buffer virtual address */
+	struct ce_result_dump_format *result; /* ponter to result dump */
+	struct ce_result_dump_format *result_null;
+};
+
+struct ce_request_info {
+	bool in_use;
+	bool in_prog;
+	enum qce_xfer_type_enum	xfer_type;
+	struct ce_sps_data ce_sps;
+	qce_comp_func_ptr_t qce_cb;	/* qce callback function pointer */
+	void *user;
+	void *areq;
+	int assoc_nents;
+	struct scatterlist *asg;        /* Formatted associated data sg  */
+	int src_nents;
+	int dst_nents;
+	dma_addr_t phy_iv_in;
+	unsigned char dec_iv[16];
+	int dir;
+	enum qce_cipher_mode_enum mode;
+	dma_addr_t phy_ota_src;
+	dma_addr_t phy_ota_dst;
+	unsigned int ota_size;
+	unsigned int req_len;
+};
+
+struct qce_driver_stats {
+	int no_of_timeouts;
+	int no_of_dummy_reqs;
+	int current_mode;
+	int outstanding_reqs;
+};
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCE50_H */
diff --git a/drivers/crypto/msm/qce_ota.h b/drivers/crypto/msm/qce_ota.h
new file mode 100644
index 0000000..2f985fa
--- /dev/null
+++ b/drivers/crypto/msm/qce_ota.h
@@ -0,0 +1,30 @@
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* QTI Crypto Engine driver OTA APIi */
+
+#ifndef __CRYPTO_MSM_QCE_OTA_H
+#define __CRYPTO_MSM_QCE_OTA_H
+
+#include <linux/platform_device.h>
+#include <linux/qcota.h>
+
+
+int qce_f8_req(void *handle, struct qce_f8_req *req,
+		void *cookie, qce_comp_func_ptr_t qce_cb);
+int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *req,
+		void *cookie, qce_comp_func_ptr_t qce_cb);
+int qce_f9_req(void *handle, struct qce_f9_req *req,
+		void *cookie, qce_comp_func_ptr_t qce_cb);
+
+#endif /* __CRYPTO_MSM_QCE_OTA_H */
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
new file mode 100644
index 0000000..0860e59
--- /dev/null
+++ b/drivers/crypto/msm/qcedev.c
@@ -0,0 +1,2054 @@
+/*
+ * QTI CE device driver.
+ *
+ * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/mman.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include <linux/msm-bus.h>
+#include <linux/qcedev.h>
+
+#include <crypto/hash.h>
+#include "qcedevi.h"
+#include "qce.h"
+
+#include <linux/compat.h>
+#include "compat_qcedev.h"
+
+#define CACHE_LINE_SIZE 32
+#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+
+static uint8_t  _std_init_vector_sha1_uint8[] =   {
+	0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
+	0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
+	0xC3, 0xD2, 0xE1, 0xF0
+};
+/* standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint8_t _std_init_vector_sha256_uint8[] = {
+	0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
+	0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
+	0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
+	0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
+};
+
+static DEFINE_MUTEX(send_cmd_lock);
+static DEFINE_MUTEX(qcedev_sent_bw_req);
+
+static void qcedev_ce_high_bw_req(struct qcedev_control *podev,
+							bool high_bw_req)
+{
+	int ret = 0;
+
+	mutex_lock(&qcedev_sent_bw_req);
+	if (high_bw_req) {
+		if (podev->high_bw_req_count == 0) {
+			ret = qce_enable_clk(podev->qce);
+			if (ret) {
+				pr_err("%s Unable enable clk\n", __func__);
+				mutex_unlock(&qcedev_sent_bw_req);
+				return;
+			}
+			ret = msm_bus_scale_client_update_request(
+					podev->bus_scale_handle, 1);
+			if (ret) {
+				pr_err("%s Unable to set to high bandwidth\n",
+							__func__);
+				ret = qce_disable_clk(podev->qce);
+				mutex_unlock(&qcedev_sent_bw_req);
+				return;
+			}
+		}
+		podev->high_bw_req_count++;
+	} else {
+		if (podev->high_bw_req_count == 1) {
+			ret = msm_bus_scale_client_update_request(
+					podev->bus_scale_handle, 0);
+			if (ret) {
+				pr_err("%s Unable to set to low bandwidth\n",
+							__func__);
+				mutex_unlock(&qcedev_sent_bw_req);
+				return;
+			}
+			ret = qce_disable_clk(podev->qce);
+			if (ret) {
+				pr_err("%s Unable disable clk\n", __func__);
+				ret = msm_bus_scale_client_update_request(
+					podev->bus_scale_handle, 1);
+				if (ret)
+					pr_err("%s Unable to set to high bandwidth\n",
+							__func__);
+				mutex_unlock(&qcedev_sent_bw_req);
+				return;
+			}
+		}
+		podev->high_bw_req_count--;
+	}
+	mutex_unlock(&qcedev_sent_bw_req);
+}
+
+#define QCEDEV_MAGIC 0x56434544 /* "qced" */
+
+static int qcedev_open(struct inode *inode, struct file *file);
+static int qcedev_release(struct inode *inode, struct file *file);
+static int start_cipher_req(struct qcedev_control *podev);
+static int start_sha_req(struct qcedev_control *podev);
+static inline long qcedev_ioctl(struct file *file,
+				unsigned int cmd, unsigned long arg);
+
+#ifdef CONFIG_COMPAT
+#include "compat_qcedev.c"
+#else
+#define compat_qcedev_ioctl	NULL
+#endif
+
+static const struct file_operations qcedev_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = qcedev_ioctl,
+	.compat_ioctl = compat_qcedev_ioctl,
+	.open = qcedev_open,
+	.release = qcedev_release,
+};
+
+static struct qcedev_control qce_dev[] = {
+	{
+		.miscdevice = {
+			.minor = MISC_DYNAMIC_MINOR,
+			.name = "qce",
+			.fops = &qcedev_fops,
+		},
+		.magic = QCEDEV_MAGIC,
+	},
+};
+
+#define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
+#define DEBUG_MAX_FNAME  16
+#define DEBUG_MAX_RW_BUF 1024
+
+struct qcedev_stat {
+	u32 qcedev_dec_success;
+	u32 qcedev_dec_fail;
+	u32 qcedev_enc_success;
+	u32 qcedev_enc_fail;
+	u32 qcedev_sha_success;
+	u32 qcedev_sha_fail;
+};
+
+static struct qcedev_stat _qcedev_stat;
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static int _debug_qcedev;
+
+static struct qcedev_control *qcedev_minor_to_control(unsigned int n)
+{
+	int i;
+
+	for (i = 0; i < MAX_QCE_DEVICE; i++) {
+		if (qce_dev[i].miscdevice.minor == n)
+			return &qce_dev[i];
+	}
+	return NULL;
+}
+
+static int qcedev_open(struct inode *inode, struct file *file)
+{
+	struct qcedev_handle *handle;
+	struct qcedev_control *podev;
+
+	podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
+	if (podev == NULL) {
+		pr_err("%s: no such device %d\n", __func__,
+					MINOR(inode->i_rdev));
+		return -ENOENT;
+	}
+
+	handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL);
+	if (handle == NULL)
+		return -ENOMEM;
+
+	handle->cntl = podev;
+	file->private_data = handle;
+	if (podev->platform_support.bus_scale_table != NULL)
+		qcedev_ce_high_bw_req(podev, true);
+	return 0;
+}
+
+static int qcedev_release(struct inode *inode, struct file *file)
+{
+	struct qcedev_control *podev;
+	struct qcedev_handle *handle;
+
+	handle =  file->private_data;
+	podev =  handle->cntl;
+	if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
+		pr_err("%s: invalid handle %p\n",
+					__func__, podev);
+	}
+	kzfree(handle);
+	file->private_data = NULL;
+	if (podev != NULL && podev->platform_support.bus_scale_table != NULL)
+		qcedev_ce_high_bw_req(podev, false);
+	return 0;
+}
+
+static void req_done(unsigned long data)
+{
+	struct qcedev_control *podev = (struct qcedev_control *)data;
+	struct qcedev_async_req *areq;
+	unsigned long flags = 0;
+	struct qcedev_async_req *new_req = NULL;
+	int ret = 0;
+
+	spin_lock_irqsave(&podev->lock, flags);
+	areq = podev->active_command;
+	podev->active_command = NULL;
+
+again:
+	if (!list_empty(&podev->ready_commands)) {
+		new_req = container_of(podev->ready_commands.next,
+						struct qcedev_async_req, list);
+		list_del(&new_req->list);
+		podev->active_command = new_req;
+		new_req->err = 0;
+		if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
+			ret = start_cipher_req(podev);
+		else
+			ret = start_sha_req(podev);
+	}
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	if (areq)
+		complete(&areq->complete);
+
+	if (new_req && ret) {
+		complete(&new_req->complete);
+		spin_lock_irqsave(&podev->lock, flags);
+		podev->active_command = NULL;
+		areq = NULL;
+		ret = 0;
+		new_req = NULL;
+		goto again;
+	}
+}
+
+void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
+	unsigned char *authdata, int ret)
+{
+	struct qcedev_sha_req *areq;
+	struct qcedev_control *pdev;
+	struct qcedev_handle *handle;
+
+	uint32_t *auth32 = (uint32_t *)authdata;
+
+	areq = (struct qcedev_sha_req *) cookie;
+	handle = (struct qcedev_handle *) areq->cookie;
+	pdev = handle->cntl;
+
+	if (digest)
+		memcpy(&handle->sha_ctxt.digest[0], digest, 32);
+
+	if (authdata) {
+		handle->sha_ctxt.auth_data[0] = auth32[0];
+		handle->sha_ctxt.auth_data[1] = auth32[1];
+		handle->sha_ctxt.auth_data[2] = auth32[2];
+		handle->sha_ctxt.auth_data[3] = auth32[3];
+	}
+
+	tasklet_schedule(&pdev->done_tasklet);
+};
+
+
+void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
+	unsigned char *iv, int ret)
+{
+	struct qcedev_cipher_req *areq;
+	struct qcedev_handle *handle;
+	struct qcedev_control *podev;
+	struct qcedev_async_req *qcedev_areq;
+
+	areq = (struct qcedev_cipher_req *) cookie;
+	handle = (struct qcedev_handle *) areq->cookie;
+	podev = handle->cntl;
+	qcedev_areq = podev->active_command;
+
+	if (iv)
+		memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
+					qcedev_areq->cipher_op_req.ivlen);
+	tasklet_schedule(&podev->done_tasklet);
+};
+
+static int start_cipher_req(struct qcedev_control *podev)
+{
+	struct qcedev_async_req *qcedev_areq;
+	struct qce_req creq;
+	int ret = 0;
+
+	/* start the command on the podev->active_command */
+	qcedev_areq = podev->active_command;
+	qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
+	if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM) {
+		pr_err("%s: Use of PMEM is not supported\n", __func__);
+		goto unsupported;
+	}
+	creq.pmem = NULL;
+	switch (qcedev_areq->cipher_op_req.alg) {
+	case QCEDEV_ALG_DES:
+		creq.alg = CIPHER_ALG_DES;
+		break;
+	case QCEDEV_ALG_3DES:
+		creq.alg = CIPHER_ALG_3DES;
+		break;
+	case QCEDEV_ALG_AES:
+		creq.alg = CIPHER_ALG_AES;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	switch (qcedev_areq->cipher_op_req.mode) {
+	case QCEDEV_AES_MODE_CBC:
+	case QCEDEV_DES_MODE_CBC:
+		creq.mode = QCE_MODE_CBC;
+		break;
+	case QCEDEV_AES_MODE_ECB:
+	case QCEDEV_DES_MODE_ECB:
+		creq.mode = QCE_MODE_ECB;
+		break;
+	case QCEDEV_AES_MODE_CTR:
+		creq.mode = QCE_MODE_CTR;
+		break;
+	case QCEDEV_AES_MODE_XTS:
+		creq.mode = QCE_MODE_XTS;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	if ((creq.alg == CIPHER_ALG_AES) &&
+		(creq.mode == QCE_MODE_CTR)) {
+		creq.dir = QCE_ENCRYPT;
+	} else {
+		if (qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC)
+			creq.dir = QCE_ENCRYPT;
+		else
+			creq.dir = QCE_DECRYPT;
+	}
+
+	creq.iv = &qcedev_areq->cipher_op_req.iv[0];
+	creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
+
+	creq.enckey =  &qcedev_areq->cipher_op_req.enckey[0];
+	creq.encklen = qcedev_areq->cipher_op_req.encklen;
+
+	creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
+
+	if (qcedev_areq->cipher_op_req.encklen == 0) {
+		if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
+			|| (qcedev_areq->cipher_op_req.op ==
+				QCEDEV_OPER_DEC_NO_KEY))
+			creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
+		else {
+			int i;
+
+			for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+				if (qcedev_areq->cipher_op_req.enckey[i] != 0)
+					break;
+			}
+
+			if ((podev->platform_support.hw_key_support == 1) &&
+						(i == QCEDEV_MAX_KEY_SIZE))
+				creq.op = QCE_REQ_ABLK_CIPHER;
+			else {
+				ret = -EINVAL;
+				goto unsupported;
+			}
+		}
+	} else {
+		creq.op = QCE_REQ_ABLK_CIPHER;
+	}
+
+	creq.qce_cb = qcedev_cipher_req_cb;
+	creq.areq = (void *)&qcedev_areq->cipher_req;
+	creq.flags = 0;
+	ret = qce_ablk_cipher_req(podev->qce, &creq);
+unsupported:
+	if (ret)
+		qcedev_areq->err = -ENXIO;
+	else
+		qcedev_areq->err = 0;
+	return ret;
+};
+
+static int start_sha_req(struct qcedev_control *podev)
+{
+	struct qcedev_async_req *qcedev_areq;
+	struct qce_sha_req sreq;
+	int ret = 0;
+	struct qcedev_handle *handle;
+
+	/* start the command on the podev->active_command */
+	qcedev_areq = podev->active_command;
+	handle = qcedev_areq->handle;
+
+	switch (qcedev_areq->sha_op_req.alg) {
+	case QCEDEV_ALG_SHA1:
+		sreq.alg = QCE_HASH_SHA1;
+		break;
+	case QCEDEV_ALG_SHA256:
+		sreq.alg = QCE_HASH_SHA256;
+		break;
+	case QCEDEV_ALG_SHA1_HMAC:
+		if (podev->ce_support.sha_hmac) {
+			sreq.alg = QCE_HASH_SHA1_HMAC;
+			sreq.authkey = &handle->sha_ctxt.authkey[0];
+			sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
+
+		} else {
+			sreq.alg = QCE_HASH_SHA1;
+			sreq.authkey = NULL;
+		}
+		break;
+	case QCEDEV_ALG_SHA256_HMAC:
+		if (podev->ce_support.sha_hmac) {
+			sreq.alg = QCE_HASH_SHA256_HMAC;
+			sreq.authkey = &handle->sha_ctxt.authkey[0];
+			sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
+		} else {
+			sreq.alg = QCE_HASH_SHA256;
+			sreq.authkey = NULL;
+		}
+		break;
+	case QCEDEV_ALG_AES_CMAC:
+		sreq.alg = QCE_HASH_AES_CMAC;
+		sreq.authkey = &handle->sha_ctxt.authkey[0];
+		sreq.authklen = qcedev_areq->sha_op_req.authklen;
+		break;
+	default:
+		pr_err("Algorithm %d not supported, exiting\n",
+			qcedev_areq->sha_op_req.alg);
+		return -EINVAL;
+	};
+
+	qcedev_areq->sha_req.cookie = handle;
+
+	sreq.qce_cb = qcedev_sha_req_cb;
+	if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
+		sreq.auth_data[0] = handle->sha_ctxt.auth_data[0];
+		sreq.auth_data[1] = handle->sha_ctxt.auth_data[1];
+		sreq.auth_data[2] = handle->sha_ctxt.auth_data[2];
+		sreq.auth_data[3] = handle->sha_ctxt.auth_data[3];
+		sreq.digest = &handle->sha_ctxt.digest[0];
+		sreq.first_blk = handle->sha_ctxt.first_blk;
+		sreq.last_blk = handle->sha_ctxt.last_blk;
+	}
+	sreq.size = qcedev_areq->sha_req.sreq.nbytes;
+	sreq.src = qcedev_areq->sha_req.sreq.src;
+	sreq.areq = (void *)&qcedev_areq->sha_req;
+	sreq.flags = 0;
+
+	ret = qce_process_sha_req(podev->qce, &sreq);
+
+	if (ret)
+		qcedev_areq->err = -ENXIO;
+	else
+		qcedev_areq->err = 0;
+	return ret;
+};
+
+static int submit_req(struct qcedev_async_req *qcedev_areq,
+					struct qcedev_handle *handle)
+{
+	struct qcedev_control *podev;
+	unsigned long flags = 0;
+	int ret = 0;
+	struct qcedev_stat *pstat;
+
+	qcedev_areq->err = 0;
+	podev = handle->cntl;
+
+	spin_lock_irqsave(&podev->lock, flags);
+
+	if (podev->active_command == NULL) {
+		podev->active_command = qcedev_areq;
+		if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
+			ret = start_cipher_req(podev);
+		else
+			ret = start_sha_req(podev);
+	} else {
+		list_add_tail(&qcedev_areq->list, &podev->ready_commands);
+	}
+
+	if (ret != 0)
+		podev->active_command = NULL;
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	if (ret == 0)
+		wait_for_completion(&qcedev_areq->complete);
+
+	if (ret)
+		qcedev_areq->err = -EIO;
+
+	pstat = &_qcedev_stat;
+	if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
+		switch (qcedev_areq->cipher_op_req.op) {
+		case QCEDEV_OPER_DEC:
+			if (qcedev_areq->err)
+				pstat->qcedev_dec_fail++;
+			else
+				pstat->qcedev_dec_success++;
+			break;
+		case QCEDEV_OPER_ENC:
+			if (qcedev_areq->err)
+				pstat->qcedev_enc_fail++;
+			else
+				pstat->qcedev_enc_success++;
+			break;
+		default:
+			break;
+		};
+	} else {
+		if (qcedev_areq->err)
+			pstat->qcedev_sha_fail++;
+		else
+			pstat->qcedev_sha_success++;
+	}
+
+	return qcedev_areq->err;
+}
+
+static int qcedev_sha_init(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle)
+{
+	struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt;
+
+	memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
+	sha_ctxt->first_blk = 1;
+
+	if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
+		memcpy(&sha_ctxt->digest[0],
+			&_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
+		sha_ctxt->diglen = SHA1_DIGEST_SIZE;
+	} else {
+		if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
+			memcpy(&sha_ctxt->digest[0],
+					&_std_init_vector_sha256_uint8[0],
+					SHA256_DIGEST_SIZE);
+			sha_ctxt->diglen = SHA256_DIGEST_SIZE;
+		}
+	}
+	sha_ctxt->init_done = true;
+	return 0;
+}
+
+
+static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_handle *handle,
+				struct scatterlist *sg_src)
+{
+	int err = 0;
+	int i = 0;
+	uint32_t total;
+
+	uint8_t *user_src = NULL;
+	uint8_t *k_src = NULL;
+	uint8_t *k_buf_src = NULL;
+	uint8_t *k_align_src = NULL;
+
+	uint32_t sha_pad_len = 0;
+	uint32_t trailing_buf_len = 0;
+	uint32_t t_buf = handle->sha_ctxt.trailing_buf_len;
+	uint32_t sha_block_size;
+
+	total = qcedev_areq->sha_op_req.data_len + t_buf;
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
+		sha_block_size = SHA1_BLOCK_SIZE;
+	else
+		sha_block_size = SHA256_BLOCK_SIZE;
+
+	if (total <= sha_block_size) {
+		uint32_t len =  qcedev_areq->sha_op_req.data_len;
+
+		i = 0;
+
+		k_src = &handle->sha_ctxt.trailing_buf[t_buf];
+
+		/* Copy data from user src(s) */
+		while (len > 0) {
+			user_src =
+			(void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+			if (user_src && copy_from_user(k_src,
+				(void __user *)user_src,
+				qcedev_areq->sha_op_req.data[i].len))
+				return -EFAULT;
+
+			len -= qcedev_areq->sha_op_req.data[i].len;
+			k_src += qcedev_areq->sha_op_req.data[i].len;
+			i++;
+		}
+		handle->sha_ctxt.trailing_buf_len = total;
+
+		return 0;
+	}
+
+
+	k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
+				GFP_KERNEL);
+	if (k_buf_src == NULL)
+		return -ENOMEM;
+
+	k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
+							CACHE_LINE_SIZE);
+	k_src = k_align_src;
+
+	/* check for trailing buffer from previous updates and append it */
+	if (t_buf > 0) {
+		memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
+								t_buf);
+		k_src += t_buf;
+	}
+
+	/* Copy data from user src(s) */
+	user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
+	if (user_src && copy_from_user(k_src,
+				(void __user *)user_src,
+				qcedev_areq->sha_op_req.data[0].len)) {
+		kzfree(k_buf_src);
+		return -EFAULT;
+	}
+	k_src += qcedev_areq->sha_op_req.data[0].len;
+	for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
+		user_src = (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+		if (user_src && copy_from_user(k_src,
+					(void __user *)user_src,
+					qcedev_areq->sha_op_req.data[i].len)) {
+			kzfree(k_buf_src);
+			return -EFAULT;
+		}
+		k_src += qcedev_areq->sha_op_req.data[i].len;
+	}
+
+	/*  get new trailing buffer */
+	sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
+	trailing_buf_len =  CE_SHA_BLOCK_SIZE - sha_pad_len;
+
+	qcedev_areq->sha_req.sreq.src = sg_src;
+	sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src,
+						total-trailing_buf_len);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+	qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
+
+	/*  update sha_ctxt trailing buf content to new trailing buf */
+	if (trailing_buf_len > 0) {
+		memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
+		memcpy(&handle->sha_ctxt.trailing_buf[0],
+			(k_src - trailing_buf_len),
+			trailing_buf_len);
+	}
+	handle->sha_ctxt.trailing_buf_len = trailing_buf_len;
+
+	err = submit_req(qcedev_areq, handle);
+
+	handle->sha_ctxt.last_blk = 0;
+	handle->sha_ctxt.first_blk = 0;
+
+	kzfree(k_buf_src);
+	return err;
+}
+
+static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_handle *handle,
+				struct scatterlist *sg_src)
+{
+	int err = 0;
+	int i = 0;
+	int j = 0;
+	int k = 0;
+	int num_entries = 0;
+	uint32_t total = 0;
+
+	if (handle->sha_ctxt.init_done == false) {
+		pr_err("%s Init was not called\n", __func__);
+		return -EINVAL;
+	}
+
+	if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
+
+		struct	qcedev_sha_op_req *saved_req;
+		struct	qcedev_sha_op_req req;
+		struct	qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
+
+		/* save the original req structure */
+		saved_req =
+			kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL);
+		if (saved_req == NULL) {
+			pr_err("%s:Can't Allocate mem:saved_req 0x%lx\n",
+						__func__, (uintptr_t)saved_req);
+			return -ENOMEM;
+		}
+		memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req));
+		memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req));
+
+		i = 0;
+		/* Address 32 KB  at a time */
+		while ((i < req.entries) && (err == 0)) {
+			if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
+				sreq->data[0].len = QCE_MAX_OPER_DATA;
+				if (i > 0) {
+					sreq->data[0].vaddr =
+							sreq->data[i].vaddr;
+				}
+
+				sreq->data_len = QCE_MAX_OPER_DATA;
+				sreq->entries = 1;
+
+				err = qcedev_sha_update_max_xfer(qcedev_areq,
+								handle, sg_src);
+
+				sreq->data[i].len = req.data[i].len -
+							QCE_MAX_OPER_DATA;
+				sreq->data[i].vaddr = req.data[i].vaddr +
+							QCE_MAX_OPER_DATA;
+				req.data[i].vaddr = sreq->data[i].vaddr;
+				req.data[i].len = sreq->data[i].len;
+			} else {
+				total = 0;
+				for (j = i; j < req.entries; j++) {
+					num_entries++;
+					if ((total + sreq->data[j].len) >=
+							QCE_MAX_OPER_DATA) {
+						sreq->data[j].len =
+						(QCE_MAX_OPER_DATA - total);
+						total = QCE_MAX_OPER_DATA;
+						break;
+					}
+					total += sreq->data[j].len;
+				}
+
+				sreq->data_len = total;
+				if (i > 0)
+					for (k = 0; k < num_entries; k++) {
+						sreq->data[k].len =
+							sreq->data[i+k].len;
+						sreq->data[k].vaddr =
+							sreq->data[i+k].vaddr;
+					}
+				sreq->entries = num_entries;
+
+				i = j;
+				err = qcedev_sha_update_max_xfer(qcedev_areq,
+								handle, sg_src);
+				num_entries = 0;
+
+				sreq->data[i].vaddr = req.data[i].vaddr +
+							sreq->data[i].len;
+				sreq->data[i].len = req.data[i].len -
+							sreq->data[i].len;
+				req.data[i].vaddr = sreq->data[i].vaddr;
+				req.data[i].len = sreq->data[i].len;
+
+				if (sreq->data[i].len == 0)
+					i++;
+			}
+		} /* end of while ((i < req.entries) && (err == 0)) */
+
+		/* Restore the original req structure */
+		for (i = 0; i < saved_req->entries; i++) {
+			sreq->data[i].len = saved_req->data[i].len;
+			sreq->data[i].vaddr = saved_req->data[i].vaddr;
+		}
+		sreq->entries = saved_req->entries;
+		sreq->data_len = saved_req->data_len;
+		kzfree(saved_req);
+	} else
+		err = qcedev_sha_update_max_xfer(qcedev_areq, handle, sg_src);
+
+	return err;
+}
+
+static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_handle *handle)
+{
+	int err = 0;
+	struct scatterlist sg_src;
+	uint32_t total;
+	uint8_t *k_buf_src = NULL;
+	uint8_t *k_align_src = NULL;
+
+	if (handle->sha_ctxt.init_done == false) {
+		pr_err("%s Init was not called\n", __func__);
+		return -EINVAL;
+	}
+
+	handle->sha_ctxt.last_blk = 1;
+
+	total = handle->sha_ctxt.trailing_buf_len;
+
+	if (total) {
+		k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
+					GFP_KERNEL);
+		if (k_buf_src == NULL)
+			return -ENOMEM;
+
+		k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
+							CACHE_LINE_SIZE);
+		memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
+	}
+	qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
+	sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src, total);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+	qcedev_areq->sha_req.sreq.nbytes = total;
+
+	err = submit_req(qcedev_areq, handle);
+
+	handle->sha_ctxt.first_blk = 0;
+	handle->sha_ctxt.last_blk = 0;
+	handle->sha_ctxt.auth_data[0] = 0;
+	handle->sha_ctxt.auth_data[1] = 0;
+	handle->sha_ctxt.trailing_buf_len = 0;
+	handle->sha_ctxt.init_done = false;
+	memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
+
+	kzfree(k_buf_src);
+	return err;
+}
+
+static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
+					struct qcedev_handle *handle,
+					struct scatterlist *sg_src)
+{
+	int err = 0;
+	int i = 0;
+	uint32_t total;
+
+	uint8_t *user_src = NULL;
+	uint8_t *k_src = NULL;
+	uint8_t *k_buf_src = NULL;
+
+	total = qcedev_areq->sha_op_req.data_len;
+
+	if (copy_from_user(&handle->sha_ctxt.authkey[0],
+				(void __user *)qcedev_areq->sha_op_req.authkey,
+				qcedev_areq->sha_op_req.authklen))
+		return -EFAULT;
+
+
+	k_buf_src = kmalloc(total, GFP_KERNEL);
+	if (k_buf_src == NULL)
+		return -ENOMEM;
+
+	k_src = k_buf_src;
+
+	/* Copy data from user src(s) */
+	user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
+	for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
+		user_src =
+			(void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+		if (user_src && copy_from_user(k_src, (void __user *)user_src,
+				qcedev_areq->sha_op_req.data[i].len)) {
+			kzfree(k_buf_src);
+			return -EFAULT;
+		}
+		k_src += qcedev_areq->sha_op_req.data[i].len;
+	}
+
+	qcedev_areq->sha_req.sreq.src = sg_src;
+	sg_set_buf(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+	qcedev_areq->sha_req.sreq.nbytes = total;
+	handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen;
+	err = submit_req(qcedev_areq, handle);
+
+	kzfree(k_buf_src);
+	return err;
+}
+
+static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
+					struct qcedev_handle *handle,
+					struct scatterlist *sg_src)
+{
+	int err = 0;
+
+	if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
+		qcedev_sha_init(areq, handle);
+		if (copy_from_user(&handle->sha_ctxt.authkey[0],
+				(void __user *)areq->sha_op_req.authkey,
+				areq->sha_op_req.authklen))
+			return -EFAULT;
+	} else {
+		struct qcedev_async_req authkey_areq;
+		uint8_t	authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
+
+		init_completion(&authkey_areq.complete);
+
+		authkey_areq.sha_op_req.entries = 1;
+		authkey_areq.sha_op_req.data[0].vaddr =
+						areq->sha_op_req.authkey;
+		authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
+		authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
+		authkey_areq.sha_op_req.diglen = 0;
+		authkey_areq.handle = handle;
+
+		memset(&authkey_areq.sha_op_req.digest[0], 0,
+						QCEDEV_MAX_SHA_DIGEST);
+		if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
+			authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
+		if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
+			authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
+
+		authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+
+		qcedev_sha_init(&authkey_areq, handle);
+		err = qcedev_sha_update(&authkey_areq, handle, sg_src);
+		if (!err)
+			err = qcedev_sha_final(&authkey_areq, handle);
+		else
+			return err;
+		memcpy(&authkey[0], &handle->sha_ctxt.digest[0],
+				handle->sha_ctxt.diglen);
+		qcedev_sha_init(areq, handle);
+
+		memcpy(&handle->sha_ctxt.authkey[0], &authkey[0],
+				handle->sha_ctxt.diglen);
+	}
+	return err;
+}
+
+static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_handle *handle)
+{
+	int err = 0;
+	struct scatterlist sg_src;
+	uint8_t *k_src = NULL;
+	uint32_t sha_block_size = 0;
+	uint32_t sha_digest_size = 0;
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
+		sha_digest_size = SHA1_DIGEST_SIZE;
+		sha_block_size = SHA1_BLOCK_SIZE;
+	} else {
+		if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
+			sha_digest_size = SHA256_DIGEST_SIZE;
+			sha_block_size = SHA256_BLOCK_SIZE;
+		}
+	}
+	k_src = kmalloc(sha_block_size, GFP_KERNEL);
+	if (k_src == NULL)
+		return -ENOMEM;
+
+	/* check for trailing buffer from previous updates and append it */
+	memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
+			handle->sha_ctxt.trailing_buf_len);
+
+	qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
+	sg_set_buf(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+	qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
+	memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
+	memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0],
+					sha_digest_size);
+	handle->sha_ctxt.trailing_buf_len = sha_digest_size;
+
+	handle->sha_ctxt.first_blk = 1;
+	handle->sha_ctxt.last_blk = 0;
+	handle->sha_ctxt.auth_data[0] = 0;
+	handle->sha_ctxt.auth_data[1] = 0;
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
+		memcpy(&handle->sha_ctxt.digest[0],
+			&_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
+		handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE;
+	}
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
+		memcpy(&handle->sha_ctxt.digest[0],
+			&_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
+		handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE;
+	}
+	err = submit_req(qcedev_areq, handle);
+
+	handle->sha_ctxt.last_blk = 0;
+	handle->sha_ctxt.first_blk = 0;
+
+	kzfree(k_src);
+	return err;
+}
+
+static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle, bool ikey)
+{
+	int i;
+	uint32_t constant;
+	uint32_t sha_block_size;
+
+	if (ikey)
+		constant = 0x36;
+	else
+		constant = 0x5c;
+
+	if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
+		sha_block_size = SHA1_BLOCK_SIZE;
+	else
+		sha_block_size = SHA256_BLOCK_SIZE;
+
+	memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
+	for (i = 0; i < sha_block_size; i++)
+		handle->sha_ctxt.trailing_buf[i] =
+				(handle->sha_ctxt.authkey[i] ^ constant);
+
+	handle->sha_ctxt.trailing_buf_len = sha_block_size;
+	return 0;
+}
+
+static int qcedev_hmac_init(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle,
+				struct scatterlist *sg_src)
+{
+	int err;
+	struct qcedev_control *podev = handle->cntl;
+
+	err = qcedev_set_hmac_auth_key(areq, handle, sg_src);
+	if (err)
+		return err;
+	if (!podev->ce_support.sha_hmac)
+		qcedev_hmac_update_iokey(areq, handle, true);
+	return 0;
+}
+
+static int qcedev_hmac_final(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle)
+{
+	int err;
+	struct qcedev_control *podev = handle->cntl;
+
+	err = qcedev_sha_final(areq, handle);
+	if (podev->ce_support.sha_hmac)
+		return err;
+
+	qcedev_hmac_update_iokey(areq, handle, false);
+	err = qcedev_hmac_get_ohash(areq, handle);
+	if (err)
+		return err;
+	err = qcedev_sha_final(areq, handle);
+
+	return err;
+}
+
+static int qcedev_hash_init(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle,
+				struct scatterlist *sg_src)
+{
+	if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
+		return qcedev_sha_init(areq, handle);
+	else
+		return qcedev_hmac_init(areq, handle, sg_src);
+}
+
+static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_handle *handle,
+				struct scatterlist *sg_src)
+{
+	return qcedev_sha_update(qcedev_areq, handle, sg_src);
+}
+
+static int qcedev_hash_final(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle)
+{
+	if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
+		return qcedev_sha_final(areq, handle);
+	else
+		return qcedev_hmac_final(areq, handle);
+}
+
+static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
+				int *di, struct qcedev_handle *handle,
+				uint8_t *k_align_src)
+{
+	int err = 0;
+	int i = 0;
+	int dst_i = *di;
+	struct scatterlist sg_src;
+	uint32_t byteoffset = 0;
+	uint8_t *user_src = NULL;
+	uint8_t *k_align_dst = k_align_src;
+	struct	qcedev_cipher_op_req *creq = &areq->cipher_op_req;
+
+
+	if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
+		byteoffset = areq->cipher_op_req.byteoffset;
+
+	user_src = (void __user *)areq->cipher_op_req.vbuf.src[0].vaddr;
+	if (user_src && copy_from_user((k_align_src + byteoffset),
+				(void __user *)user_src,
+				areq->cipher_op_req.vbuf.src[0].len))
+		return -EFAULT;
+
+	k_align_src += byteoffset + areq->cipher_op_req.vbuf.src[0].len;
+
+	for (i = 1; i < areq->cipher_op_req.entries; i++) {
+		user_src =
+			(void __user *)areq->cipher_op_req.vbuf.src[i].vaddr;
+		if (user_src && copy_from_user(k_align_src,
+					(void __user *)user_src,
+					areq->cipher_op_req.vbuf.src[i].len)) {
+			return -EFAULT;
+		}
+		k_align_src += areq->cipher_op_req.vbuf.src[i].len;
+	}
+
+	/* restore src beginning */
+	k_align_src = k_align_dst;
+	areq->cipher_op_req.data_len += byteoffset;
+
+	areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
+	areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
+
+	/* In place encryption/decryption */
+	sg_set_buf(areq->cipher_req.creq.src,
+					k_align_dst,
+					areq->cipher_op_req.data_len);
+	sg_mark_end(areq->cipher_req.creq.src);
+
+	areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
+	areq->cipher_req.creq.info = areq->cipher_op_req.iv;
+	areq->cipher_op_req.entries = 1;
+
+	err = submit_req(areq, handle);
+
+	/* copy data to destination buffer*/
+	creq->data_len -= byteoffset;
+
+	while (creq->data_len > 0) {
+		if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
+			if (err == 0 && copy_to_user(
+				(void __user *)creq->vbuf.dst[dst_i].vaddr,
+					(k_align_dst + byteoffset),
+					creq->vbuf.dst[dst_i].len))
+				return -EFAULT;
+
+			k_align_dst += creq->vbuf.dst[dst_i].len +
+						byteoffset;
+			creq->data_len -= creq->vbuf.dst[dst_i].len;
+			dst_i++;
+		} else {
+			if (err == 0 && copy_to_user(
+				(void __user *)creq->vbuf.dst[dst_i].vaddr,
+					(k_align_dst + byteoffset),
+					creq->data_len))
+				return -EFAULT;
+
+			k_align_dst += creq->data_len;
+			creq->vbuf.dst[dst_i].len -= creq->data_len;
+			creq->vbuf.dst[dst_i].vaddr += creq->data_len;
+			creq->data_len = 0;
+		}
+	}
+	*di = dst_i;
+
+	return err;
+};
+
+static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
+						struct qcedev_handle *handle)
+{
+	int err = 0;
+	int di = 0;
+	int i = 0;
+	int j = 0;
+	int k = 0;
+	uint32_t byteoffset = 0;
+	int num_entries = 0;
+	uint32_t total = 0;
+	uint32_t len;
+	uint8_t *k_buf_src = NULL;
+	uint8_t *k_align_src = NULL;
+	uint32_t max_data_xfer;
+	struct qcedev_cipher_op_req *saved_req;
+	struct	qcedev_cipher_op_req *creq = &areq->cipher_op_req;
+
+	total = 0;
+
+	if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
+		byteoffset = areq->cipher_op_req.byteoffset;
+	k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2,
+				GFP_KERNEL);
+	if (k_buf_src == NULL)
+		return -ENOMEM;
+	k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
+							CACHE_LINE_SIZE);
+	max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
+
+	saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
+	if (saved_req == NULL) {
+		kzfree(k_buf_src);
+		return -ENOMEM;
+
+	}
+	memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
+
+	if (areq->cipher_op_req.data_len > max_data_xfer) {
+		struct qcedev_cipher_op_req req;
+
+		/* save the original req structure */
+		memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
+
+		i = 0;
+		/* Address 32 KB  at a time */
+		while ((i < req.entries) && (err == 0)) {
+			if (creq->vbuf.src[i].len > max_data_xfer) {
+				creq->vbuf.src[0].len =	max_data_xfer;
+				if (i > 0) {
+					creq->vbuf.src[0].vaddr =
+						creq->vbuf.src[i].vaddr;
+				}
+
+				creq->data_len = max_data_xfer;
+				creq->entries = 1;
+
+				err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
+						&di, handle, k_align_src);
+				if (err < 0) {
+					kzfree(k_buf_src);
+					kzfree(saved_req);
+					return err;
+				}
+
+				creq->vbuf.src[i].len =	req.vbuf.src[i].len -
+							max_data_xfer;
+				creq->vbuf.src[i].vaddr =
+						req.vbuf.src[i].vaddr +
+						max_data_xfer;
+				req.vbuf.src[i].vaddr =
+						creq->vbuf.src[i].vaddr;
+				req.vbuf.src[i].len = creq->vbuf.src[i].len;
+
+			} else {
+				total = areq->cipher_op_req.byteoffset;
+				for (j = i; j < req.entries; j++) {
+					num_entries++;
+					if ((total + creq->vbuf.src[j].len)
+							>= max_data_xfer) {
+						creq->vbuf.src[j].len =
+						max_data_xfer - total;
+						total = max_data_xfer;
+						break;
+					}
+					total += creq->vbuf.src[j].len;
+				}
+
+				creq->data_len = total;
+				if (i > 0)
+					for (k = 0; k < num_entries; k++) {
+						creq->vbuf.src[k].len =
+						creq->vbuf.src[i+k].len;
+						creq->vbuf.src[k].vaddr =
+						creq->vbuf.src[i+k].vaddr;
+					}
+				creq->entries =  num_entries;
+
+				i = j;
+				err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
+						&di, handle, k_align_src);
+				if (err < 0) {
+					kzfree(k_buf_src);
+					kzfree(saved_req);
+					return err;
+				}
+
+				num_entries = 0;
+				areq->cipher_op_req.byteoffset = 0;
+
+				creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
+					+ creq->vbuf.src[i].len;
+				creq->vbuf.src[i].len =	req.vbuf.src[i].len -
+							creq->vbuf.src[i].len;
+
+				req.vbuf.src[i].vaddr =
+						creq->vbuf.src[i].vaddr;
+				req.vbuf.src[i].len = creq->vbuf.src[i].len;
+
+				if (creq->vbuf.src[i].len == 0)
+					i++;
+			}
+
+			areq->cipher_op_req.byteoffset = 0;
+			max_data_xfer = QCE_MAX_OPER_DATA;
+			byteoffset = 0;
+
+		} /* end of while ((i < req.entries) && (err == 0)) */
+	} else
+		err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle,
+								k_align_src);
+
+	/* Restore the original req structure */
+	for (i = 0; i < saved_req->entries; i++) {
+		creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
+		creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
+	}
+	for (len = 0, i = 0; len < saved_req->data_len; i++) {
+		creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
+		creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
+		len += saved_req->vbuf.dst[i].len;
+	}
+	creq->entries = saved_req->entries;
+	creq->data_len = saved_req->data_len;
+	creq->byteoffset = saved_req->byteoffset;
+
+	kzfree(saved_req);
+	kzfree(k_buf_src);
+	return err;
+
+}
+
+static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req,
+						struct qcedev_control *podev)
+{
+	/* if intending to use HW key make sure key fields are set
+	 * correctly and HW key is indeed supported in target
+	 */
+	if (req->encklen == 0) {
+		int i;
+
+		for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+			if (req->enckey[i]) {
+				pr_err("%s: Invalid key: non-zero key input\n",
+								__func__);
+				goto error;
+			}
+		}
+		if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
+			(req->op != QCEDEV_OPER_DEC_NO_KEY))
+			if (!podev->platform_support.hw_key_support) {
+				pr_err("%s: Invalid op %d\n", __func__,
+						(uint32_t)req->op);
+				goto error;
+			}
+	} else {
+		if (req->encklen == QCEDEV_AES_KEY_192) {
+			if (!podev->ce_support.aes_key_192) {
+				pr_err("%s: AES-192 not supported\n", __func__);
+				goto error;
+			}
+		} else {
+			/* if not using HW key make sure key
+			 * length is valid
+			 */
+			if (req->mode == QCEDEV_AES_MODE_XTS) {
+				if ((req->encklen != QCEDEV_AES_KEY_128*2) &&
+				(req->encklen != QCEDEV_AES_KEY_256*2)) {
+					pr_err("%s: unsupported key size: %d\n",
+							__func__, req->encklen);
+					goto error;
+				}
+			} else {
+				if ((req->encklen != QCEDEV_AES_KEY_128) &&
+					(req->encklen != QCEDEV_AES_KEY_256)) {
+					pr_err("%s: unsupported key size %d\n",
+							__func__, req->encklen);
+					goto error;
+				}
+			}
+		}
+	}
+	return 0;
+error:
+	return -EINVAL;
+}
+
+static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
+						struct qcedev_control *podev)
+{
+	uint32_t total = 0;
+	uint32_t i;
+
+	if (req->use_pmem) {
+		pr_err("%s: Use of PMEM is not supported\n", __func__);
+		goto error;
+	}
+	if ((req->entries == 0) || (req->data_len == 0) ||
+			(req->entries > QCEDEV_MAX_BUFFERS)) {
+		pr_err("%s: Invalid cipher length/entries\n", __func__);
+		goto error;
+	}
+	if ((req->alg >= QCEDEV_ALG_LAST) ||
+		(req->mode >= QCEDEV_AES_DES_MODE_LAST)) {
+		pr_err("%s: Invalid algorithm %d\n", __func__,
+						(uint32_t)req->alg);
+		goto error;
+	}
+	if ((req->mode == QCEDEV_AES_MODE_XTS) &&
+				(!podev->ce_support.aes_xts)) {
+		pr_err("%s: XTS algorithm is not supported\n", __func__);
+		goto error;
+	}
+	if (req->alg == QCEDEV_ALG_AES) {
+		if (qcedev_check_cipher_key(req, podev))
+			goto error;
+
+	}
+	/* if using a byteoffset, make sure it is CTR mode using vbuf */
+	if (req->byteoffset) {
+		if (req->mode != QCEDEV_AES_MODE_CTR) {
+			pr_err("%s: Operation on byte offset not supported\n",
+								 __func__);
+			goto error;
+		}
+		if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
+			pr_err("%s: Invalid byte offset\n", __func__);
+			goto error;
+		}
+		total = req->byteoffset;
+		for (i = 0; i < req->entries; i++) {
+			if (total > U32_MAX - req->vbuf.src[i].len) {
+				pr_err("%s:Integer overflow on total src len\n",
+					__func__);
+				goto error;
+			}
+			total += req->vbuf.src[i].len;
+		}
+	}
+
+	if (req->data_len < req->byteoffset) {
+		pr_err("%s: req data length %u is less than byteoffset %u\n",
+				__func__, req->data_len, req->byteoffset);
+		goto error;
+	}
+
+	/* Ensure IV size */
+	if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
+		pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
+		goto error;
+	}
+
+	/* Ensure Key size */
+	if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
+		pr_err("%s: Klen is not correct: %u\n", __func__, req->encklen);
+		goto error;
+	}
+
+	/* Ensure zer ivlen for ECB  mode  */
+	if (req->ivlen > 0) {
+		if ((req->mode == QCEDEV_AES_MODE_ECB) ||
+				(req->mode == QCEDEV_DES_MODE_ECB)) {
+			pr_err("%s: Expecting a zero length IV\n", __func__);
+			goto error;
+		}
+	} else {
+		if ((req->mode != QCEDEV_AES_MODE_ECB) &&
+				(req->mode != QCEDEV_DES_MODE_ECB)) {
+			pr_err("%s: Expecting a non-zero ength IV\n", __func__);
+			goto error;
+		}
+	}
+	/* Check for sum of all dst length is equal to data_len  */
+	for (i = 0, total = 0; i < req->entries; i++) {
+		if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
+			pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
+				__func__, i, req->vbuf.dst[i].len);
+			goto error;
+		}
+		if (req->vbuf.dst[i].len >= U32_MAX - total) {
+			pr_err("%s: Integer overflow on total req dst vbuf length\n",
+				__func__);
+			goto error;
+		}
+		total += req->vbuf.dst[i].len;
+	}
+	if (total != req->data_len) {
+		pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
+			__func__, i, total, req->data_len);
+		goto error;
+	}
+	/* Check for sum of all src length is equal to data_len  */
+	for (i = 0, total = 0; i < req->entries; i++) {
+		if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
+			pr_err("%s: NULL req src vbuf[%d] with length %d\n",
+				__func__, i, req->vbuf.src[i].len);
+			goto error;
+		}
+		if (req->vbuf.src[i].len > U32_MAX - total) {
+			pr_err("%s: Integer overflow on total req src vbuf length\n",
+				__func__);
+			goto error;
+		}
+		total += req->vbuf.src[i].len;
+	}
+	if (total != req->data_len) {
+		pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
+			__func__, total, req->data_len);
+		goto error;
+	}
+	return 0;
+error:
+	return -EINVAL;
+
+}
+
+static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
+						struct qcedev_control *podev)
+{
+	uint32_t total = 0;
+	uint32_t i;
+
+	if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
+				(!podev->ce_support.cmac)) {
+		pr_err("%s: CMAC not supported\n", __func__);
+		goto sha_error;
+	}
+	if ((!req->entries) || (req->entries > QCEDEV_MAX_BUFFERS)) {
+		pr_err("%s: Invalid num entries (%d)\n",
+						__func__, req->entries);
+		goto sha_error;
+	}
+
+	if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST) {
+		pr_err("%s: Invalid algorithm (%d)\n", __func__, req->alg);
+		goto sha_error;
+	}
+	if ((req->alg == QCEDEV_ALG_SHA1_HMAC) ||
+			(req->alg == QCEDEV_ALG_SHA1_HMAC)) {
+		if (req->authkey == NULL) {
+			pr_err("%s: Invalid authkey pointer\n", __func__);
+			goto sha_error;
+		}
+		if (req->authklen <= 0) {
+			pr_err("%s: Invalid authkey length (%d)\n",
+						__func__, req->authklen);
+			goto sha_error;
+		}
+	}
+
+	if (req->alg == QCEDEV_ALG_AES_CMAC) {
+		if ((req->authklen != QCEDEV_AES_KEY_128) &&
+					(req->authklen != QCEDEV_AES_KEY_256)) {
+			pr_err("%s: unsupported key length\n", __func__);
+			goto sha_error;
+		}
+	}
+
+	/* Check for sum of all src length is equal to data_len  */
+	for (i = 0, total = 0; i < req->entries; i++) {
+		if (req->data[i].len > U32_MAX - total) {
+			pr_err("%s: Integer overflow on total req buf length\n",
+				__func__);
+			goto sha_error;
+		}
+		total += req->data[i].len;
+	}
+
+	if (total != req->data_len) {
+		pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
+			__func__, total, req->data_len);
+		goto sha_error;
+	}
+	return 0;
+sha_error:
+	return -EINVAL;
+}
+
+static inline long qcedev_ioctl(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	int err = 0;
+	struct qcedev_handle *handle;
+	struct qcedev_control *podev;
+	struct qcedev_async_req qcedev_areq;
+	struct qcedev_stat *pstat;
+
+	handle =  file->private_data;
+	podev =  handle->cntl;
+	qcedev_areq.handle = handle;
+	if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
+		pr_err("%s: invalid handle %p\n",
+			__func__, podev);
+		return -ENOENT;
+	}
+
+	/* Verify user arguments. */
+	if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC)
+		return -ENOTTY;
+
+	init_completion(&qcedev_areq.complete);
+	pstat = &_qcedev_stat;
+
+	switch (cmd) {
+	case QCEDEV_IOCTL_ENC_REQ:
+	case QCEDEV_IOCTL_DEC_REQ:
+		if (copy_from_user(&qcedev_areq.cipher_op_req,
+				(void __user *)arg,
+				sizeof(struct qcedev_cipher_op_req)))
+			return -EFAULT;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_CIPHER;
+
+		if (qcedev_check_cipher_params(&qcedev_areq.cipher_op_req,
+				podev))
+			return -EINVAL;
+
+		err = qcedev_vbuf_ablk_cipher(&qcedev_areq, handle);
+		if (err)
+			return err;
+		if (copy_to_user((void __user *)arg,
+					&qcedev_areq.cipher_op_req,
+					sizeof(struct qcedev_cipher_op_req)))
+			return -EFAULT;
+		break;
+
+	case QCEDEV_IOCTL_SHA_INIT_REQ:
+		{
+		struct scatterlist sg_src;
+
+		if (copy_from_user(&qcedev_areq.sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+			return -EINVAL;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+		err = qcedev_hash_init(&qcedev_areq, handle, &sg_src);
+		if (err)
+			return err;
+		if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		}
+		handle->sha_ctxt.init_done = true;
+		break;
+	case QCEDEV_IOCTL_GET_CMAC_REQ:
+		if (!podev->ce_support.cmac)
+			return -ENOTTY;
+	case QCEDEV_IOCTL_SHA_UPDATE_REQ:
+		{
+		struct scatterlist sg_src;
+
+		if (copy_from_user(&qcedev_areq.sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+			return -EINVAL;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+
+		if (qcedev_areq.sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
+			err = qcedev_hash_cmac(&qcedev_areq, handle, &sg_src);
+			if (err)
+				return err;
+		} else {
+			if (handle->sha_ctxt.init_done == false) {
+				pr_err("%s Init was not called\n", __func__);
+				return -EINVAL;
+			}
+			err = qcedev_hash_update(&qcedev_areq, handle, &sg_src);
+			if (err)
+				return err;
+		}
+
+		if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
+			pr_err("Invalid sha_ctxt.diglen %d\n",
+					handle->sha_ctxt.diglen);
+			return -EINVAL;
+		}
+		memcpy(&qcedev_areq.sha_op_req.digest[0],
+				&handle->sha_ctxt.digest[0],
+				handle->sha_ctxt.diglen);
+		if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		}
+		break;
+
+	case QCEDEV_IOCTL_SHA_FINAL_REQ:
+
+		if (handle->sha_ctxt.init_done == false) {
+			pr_err("%s Init was not called\n", __func__);
+			return -EINVAL;
+		}
+		if (copy_from_user(&qcedev_areq.sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+			return -EINVAL;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+		err = qcedev_hash_final(&qcedev_areq, handle);
+		if (err)
+			return err;
+		qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
+		memcpy(&qcedev_areq.sha_op_req.digest[0],
+				&handle->sha_ctxt.digest[0],
+				handle->sha_ctxt.diglen);
+		if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		handle->sha_ctxt.init_done = false;
+		break;
+
+	case QCEDEV_IOCTL_GET_SHA_REQ:
+		{
+		struct scatterlist sg_src;
+
+		if (copy_from_user(&qcedev_areq.sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+			return -EINVAL;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+		qcedev_hash_init(&qcedev_areq, handle, &sg_src);
+		err = qcedev_hash_update(&qcedev_areq, handle, &sg_src);
+		if (err)
+			return err;
+		err = qcedev_hash_final(&qcedev_areq, handle);
+		if (err)
+			return err;
+		qcedev_areq.sha_op_req.diglen =	handle->sha_ctxt.diglen;
+		memcpy(&qcedev_areq.sha_op_req.digest[0],
+				&handle->sha_ctxt.digest[0],
+				handle->sha_ctxt.diglen);
+		if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		}
+		break;
+
+	default:
+		return -ENOTTY;
+	}
+
+	return err;
+}
+
+static int qcedev_probe(struct platform_device *pdev)
+{
+	void *handle = NULL;
+	int rc = 0;
+	struct qcedev_control *podev;
+	struct msm_ce_hw_support *platform_support;
+
+	podev = &qce_dev[0];
+
+	podev->high_bw_req_count = 0;
+	INIT_LIST_HEAD(&podev->ready_commands);
+	podev->active_command = NULL;
+
+	spin_lock_init(&podev->lock);
+
+	tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
+
+	/* open qce */
+	handle = qce_open(pdev, &rc);
+	if (handle == NULL) {
+		platform_set_drvdata(pdev, NULL);
+		return rc;
+	}
+
+	podev->qce = handle;
+	podev->pdev = pdev;
+	platform_set_drvdata(pdev, podev);
+
+	rc = misc_register(&podev->miscdevice);
+	qce_hw_support(podev->qce, &podev->ce_support);
+	if (podev->ce_support.bam) {
+		podev->platform_support.ce_shared = 0;
+		podev->platform_support.shared_ce_resource = 0;
+		podev->platform_support.hw_key_support =
+						podev->ce_support.hw_key;
+		podev->platform_support.bus_scale_table = NULL;
+		podev->platform_support.sha_hmac = 1;
+
+		podev->platform_support.bus_scale_table =
+			(struct msm_bus_scale_pdata *)
+					msm_bus_cl_get_pdata(pdev);
+		if (!podev->platform_support.bus_scale_table)
+			pr_err("bus_scale_table is NULL\n");
+	} else {
+		platform_support =
+			(struct msm_ce_hw_support *)pdev->dev.platform_data;
+		podev->platform_support.ce_shared = platform_support->ce_shared;
+		podev->platform_support.shared_ce_resource =
+				platform_support->shared_ce_resource;
+		podev->platform_support.hw_key_support =
+				platform_support->hw_key_support;
+		podev->platform_support.bus_scale_table =
+				platform_support->bus_scale_table;
+		podev->platform_support.sha_hmac = platform_support->sha_hmac;
+	}
+	if (podev->platform_support.bus_scale_table != NULL) {
+		podev->bus_scale_handle =
+			msm_bus_scale_register_client(
+				(struct msm_bus_scale_pdata *)
+				podev->platform_support.bus_scale_table);
+		if (!podev->bus_scale_handle) {
+			pr_err("%s not able to get bus scale\n",
+				__func__);
+			rc =  -ENOMEM;
+			goto err;
+		}
+	}
+
+	if (rc >= 0)
+		return 0;
+
+	if (podev->platform_support.bus_scale_table != NULL)
+		msm_bus_scale_unregister_client(podev->bus_scale_handle);
+err:
+
+	if (handle)
+		qce_close(handle);
+	platform_set_drvdata(pdev, NULL);
+	podev->qce = NULL;
+	podev->pdev = NULL;
+	return rc;
+};
+
+static int qcedev_remove(struct platform_device *pdev)
+{
+	struct qcedev_control *podev;
+
+	podev = platform_get_drvdata(pdev);
+	if (!podev)
+		return 0;
+	if (podev->qce)
+		qce_close(podev->qce);
+
+	if (podev->platform_support.bus_scale_table != NULL)
+		msm_bus_scale_unregister_client(podev->bus_scale_handle);
+
+	if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
+		misc_deregister(&podev->miscdevice);
+	tasklet_kill(&podev->done_tasklet);
+	return 0;
+};
+
+static int qcedev_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct qcedev_control *podev;
+	int ret;
+
+	podev = platform_get_drvdata(pdev);
+
+	if (!podev || !podev->platform_support.bus_scale_table)
+		return 0;
+
+	mutex_lock(&qcedev_sent_bw_req);
+	if (podev->high_bw_req_count) {
+		ret = msm_bus_scale_client_update_request(
+				podev->bus_scale_handle, 0);
+		if (ret) {
+			pr_err("%s Unable to set to low bandwidth\n",
+						__func__);
+			goto suspend_exit;
+		}
+		ret = qce_disable_clk(podev->qce);
+		if (ret) {
+			pr_err("%s Unable disable clk\n", __func__);
+			ret = msm_bus_scale_client_update_request(
+				podev->bus_scale_handle, 1);
+			if (ret)
+				pr_err("%s Unable to set to high bandwidth\n",
+					__func__);
+			goto suspend_exit;
+		}
+	}
+
+suspend_exit:
+	mutex_unlock(&qcedev_sent_bw_req);
+	return 0;
+}
+
+static int qcedev_resume(struct platform_device *pdev)
+{
+	struct qcedev_control *podev;
+	int ret;
+
+	podev = platform_get_drvdata(pdev);
+
+	if (!podev || !podev->platform_support.bus_scale_table)
+		return 0;
+
+	mutex_lock(&qcedev_sent_bw_req);
+	if (podev->high_bw_req_count) {
+		ret = qce_enable_clk(podev->qce);
+		if (ret) {
+			pr_err("%s Unable enable clk\n", __func__);
+			goto resume_exit;
+		}
+		ret = msm_bus_scale_client_update_request(
+				podev->bus_scale_handle, 1);
+		if (ret) {
+			pr_err("%s Unable to set to high bandwidth\n",
+						__func__);
+			ret = qce_disable_clk(podev->qce);
+			if (ret)
+				pr_err("%s Unable enable clk\n",
+					__func__);
+			goto resume_exit;
+		}
+	}
+
+resume_exit:
+	mutex_unlock(&qcedev_sent_bw_req);
+	return 0;
+}
+
+static const struct of_device_id qcedev_match[] = {
+	{	.compatible = "qcom,qcedev",
+	},
+	{}
+};
+
+static struct platform_driver qcedev_plat_driver = {
+	.probe = qcedev_probe,
+	.remove = qcedev_remove,
+	.suspend = qcedev_suspend,
+	.resume = qcedev_resume,
+	.driver = {
+		.name = "qce",
+		.owner = THIS_MODULE,
+		.of_match_table = qcedev_match,
+	},
+};
+
+static int _disp_stats(int id)
+{
+	struct qcedev_stat *pstat;
+	int len = 0;
+
+	pstat = &_qcedev_stat;
+	len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+			"\nQTI QCE dev driver %d Statistics:\n",
+				id + 1);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Encryption operation success       : %d\n",
+					pstat->qcedev_enc_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Encryption operation fail   : %d\n",
+					pstat->qcedev_enc_fail);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Decryption operation success     : %d\n",
+					pstat->qcedev_dec_success);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Encryption operation fail          : %d\n",
+					pstat->qcedev_dec_fail);
+
+	return len;
+}
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	ssize_t rc = -EINVAL;
+	int qcedev = *((int *) file->private_data);
+	int len;
+
+	len = _disp_stats(qcedev);
+
+	if (len <= count)
+		rc = simple_read_from_buffer((void __user *) buf, len,
+			ppos, (void *) _debug_read_buf, len);
+	return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	memset((char *)&_qcedev_stat, 0, sizeof(struct qcedev_stat));
+	return count;
+};
+
+static const struct file_operations _debug_stats_ops = {
+	.open =         _debug_stats_open,
+	.read =         _debug_stats_read,
+	.write =        _debug_stats_write,
+};
+
+static int _qcedev_debug_init(void)
+{
+	int rc;
+	char name[DEBUG_MAX_FNAME];
+	struct dentry *dent;
+
+	_debug_dent = debugfs_create_dir("qcedev", NULL);
+	if (IS_ERR(_debug_dent)) {
+		pr_err("qcedev debugfs_create_dir fail, error %ld\n",
+				PTR_ERR(_debug_dent));
+		return PTR_ERR(_debug_dent);
+	}
+
+	snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
+	_debug_qcedev = 0;
+	dent = debugfs_create_file(name, 0644, _debug_dent,
+			&_debug_qcedev, &_debug_stats_ops);
+	if (dent == NULL) {
+		pr_err("qcedev debugfs_create_file fail, error %ld\n",
+				PTR_ERR(dent));
+		rc = PTR_ERR(dent);
+		goto err;
+	}
+	return 0;
+err:
+	debugfs_remove_recursive(_debug_dent);
+	return rc;
+}
+
+static int qcedev_init(void)
+{
+	int rc;
+
+	rc = _qcedev_debug_init();
+	if (rc)
+		return rc;
+	return platform_driver_register(&qcedev_plat_driver);
+}
+
+static void qcedev_exit(void)
+{
+	debugfs_remove_recursive(_debug_dent);
+	platform_driver_unregister(&qcedev_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI DEV Crypto driver");
+
+module_init(qcedev_init);
+module_exit(qcedev_exit);
diff --git a/drivers/crypto/msm/qcedevi.h b/drivers/crypto/msm/qcedevi.h
new file mode 100644
index 0000000..c26ed71
--- /dev/null
+++ b/drivers/crypto/msm/qcedevi.h
@@ -0,0 +1,125 @@
+/* QTI crypto Driver
+ *
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CRYPTO_MSM_QCEDEVI_H
+#define __CRYPTO_MSM_QCEDEVI_H
+
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <crypto/hash.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include <linux/fips_status.h>
+#include "qce.h"
+
+#define CACHE_LINE_SIZE 32
+#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+
+enum qcedev_crypto_oper_type {
+	QCEDEV_CRYPTO_OPER_CIPHER = 0,
+	QCEDEV_CRYPTO_OPER_SHA = 1,
+	QCEDEV_CRYPTO_OPER_LAST
+};
+
+struct qcedev_handle;
+
+struct qcedev_cipher_req {
+	struct ablkcipher_request creq;
+	void *cookie;
+};
+
+struct qcedev_sha_req {
+	struct ahash_request sreq;
+	void *cookie;
+};
+
+struct	qcedev_sha_ctxt {
+	uint32_t	auth_data[4];
+	uint8_t	digest[QCEDEV_MAX_SHA_DIGEST];
+	uint32_t	diglen;
+	uint8_t	trailing_buf[64];
+	uint32_t	trailing_buf_len;
+	uint8_t	first_blk;
+	uint8_t	last_blk;
+	uint8_t	authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
+	bool		init_done;
+};
+
+struct qcedev_async_req {
+	struct list_head			list;
+	struct completion			complete;
+	enum qcedev_crypto_oper_type		op_type;
+	union {
+		struct qcedev_cipher_op_req	cipher_op_req;
+		struct qcedev_sha_op_req	sha_op_req;
+	};
+
+	union {
+		struct qcedev_cipher_req	cipher_req;
+		struct qcedev_sha_req		sha_req;
+	};
+	struct qcedev_handle			*handle;
+	int					err;
+};
+
+/**********************************************************************
+ * Register ourselves as a misc device to be able to access the dev driver
+ * from userspace.
+ */
+
+#define QCEDEV_DEV	"qcedev"
+
+struct qcedev_control {
+
+	/* CE features supported by platform */
+	struct msm_ce_hw_support platform_support;
+
+	uint32_t ce_lock_count;
+	uint32_t high_bw_req_count;
+
+	/* CE features/algorithms supported by HW engine*/
+	struct ce_hw_support ce_support;
+
+	uint32_t  bus_scale_handle;
+
+	/* misc device */
+	struct miscdevice miscdevice;
+
+	/* qce handle */
+	void *qce;
+
+	/* platform device */
+	struct platform_device *pdev;
+
+	unsigned int magic;
+
+	struct list_head ready_commands;
+	struct qcedev_async_req *active_command;
+	spinlock_t lock;
+	struct tasklet_struct done_tasklet;
+};
+
+struct qcedev_handle {
+	/* qcedev control handle */
+	struct qcedev_control *cntl;
+	/* qce internal sha context*/
+	struct qcedev_sha_ctxt sha_ctxt;
+};
+
+void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
+	unsigned char *iv, int ret);
+
+void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
+	unsigned char *authdata, int ret);
+
+#endif  /* __CRYPTO_MSM_QCEDEVI_H */
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
new file mode 100644
index 0000000..f184ee1
--- /dev/null
+++ b/drivers/crypto/msm/qcrypto.c
@@ -0,0 +1,5515 @@
+/*
+ * QTI Crypto driver
+ *
+ * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/llist.h>
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/cache.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include <linux/msm-bus.h>
+#include <linux/hardirq.h>
+#include <linux/qcrypto.h>
+
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/aead.h>
+
+#include <linux/fips_status.h>
+
+#include "qce.h"
+
+#define DEBUG_MAX_FNAME  16
+#define DEBUG_MAX_RW_BUF 4096
+#define QCRYPTO_BIG_NUMBER 9999999 /* a big number */
+
+/*
+ * For crypto 5.0 which has burst size alignment requirement.
+ */
+#define MAX_ALIGN_SIZE  0x40
+
+#define QCRYPTO_HIGH_BANDWIDTH_TIMEOUT 1000
+
+
+
+/* Status of response workq */
+enum resp_workq_sts {
+	NOT_SCHEDULED  = 0,
+	IS_SCHEDULED   = 1,
+	SCHEDULE_AGAIN = 2
+};
+
+/* Status of req processing by CEs */
+enum req_processing_sts {
+	STOPPED     = 0,
+	IN_PROGRESS = 1
+};
+
+enum qcrypto_bus_state {
+	BUS_NO_BANDWIDTH = 0,
+	BUS_HAS_BANDWIDTH,
+	BUS_BANDWIDTH_RELEASING,
+	BUS_BANDWIDTH_ALLOCATING,
+	BUS_SUSPENDED,
+	BUS_SUSPENDING,
+};
+
+struct crypto_stat {
+	u64 aead_sha1_aes_enc;
+	u64 aead_sha1_aes_dec;
+	u64 aead_sha1_des_enc;
+	u64 aead_sha1_des_dec;
+	u64 aead_sha1_3des_enc;
+	u64 aead_sha1_3des_dec;
+	u64 aead_sha256_aes_enc;
+	u64 aead_sha256_aes_dec;
+	u64 aead_sha256_des_enc;
+	u64 aead_sha256_des_dec;
+	u64 aead_sha256_3des_enc;
+	u64 aead_sha256_3des_dec;
+	u64 aead_ccm_aes_enc;
+	u64 aead_ccm_aes_dec;
+	u64 aead_rfc4309_ccm_aes_enc;
+	u64 aead_rfc4309_ccm_aes_dec;
+	u64 aead_op_success;
+	u64 aead_op_fail;
+	u64 aead_bad_msg;
+	u64 ablk_cipher_aes_enc;
+	u64 ablk_cipher_aes_dec;
+	u64 ablk_cipher_des_enc;
+	u64 ablk_cipher_des_dec;
+	u64 ablk_cipher_3des_enc;
+	u64 ablk_cipher_3des_dec;
+	u64 ablk_cipher_op_success;
+	u64 ablk_cipher_op_fail;
+	u64 sha1_digest;
+	u64 sha256_digest;
+	u64 sha1_hmac_digest;
+	u64 sha256_hmac_digest;
+	u64 ahash_op_success;
+	u64 ahash_op_fail;
+};
+static struct crypto_stat _qcrypto_stat;
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static bool _qcrypto_init_assign;
+struct crypto_priv;
+struct qcrypto_req_control {
+	unsigned int index;
+	bool in_use;
+	struct crypto_engine *pce;
+	struct crypto_async_request *req;
+	struct qcrypto_resp_ctx *arsp;
+	int res; /* execution result */
+};
+
+struct crypto_engine {
+	struct list_head elist;
+	void *qce; /* qce handle */
+	struct platform_device *pdev; /* platform device */
+	struct crypto_priv *pcp;
+	uint32_t  bus_scale_handle;
+	struct crypto_queue req_queue;	/*
+					 * request queue for those requests
+					 * that have this engine assigned
+					 * waiting to be executed
+					 */
+	u64 total_req;
+	u64 err_req;
+	u32 unit;
+	u32 ce_device;
+	u32 ce_hw_instance;
+	unsigned int signature;
+
+	enum qcrypto_bus_state bw_state;
+	bool   high_bw_req;
+	struct timer_list bw_reaper_timer;
+	struct work_struct bw_reaper_ws;
+	struct work_struct bw_allocate_ws;
+
+	/* engine execution sequence number */
+	u32    active_seq;
+	/* last QCRYPTO_HIGH_BANDWIDTH_TIMEOUT active_seq */
+	u32    last_active_seq;
+
+	bool   check_flag;
+	/*Added to support multi-requests*/
+	unsigned int max_req;
+	struct   qcrypto_req_control *preq_pool;
+	atomic_t req_count;
+	bool issue_req;		/* an request is being issued to qce */
+	bool first_engine;	/* this engine is the first engine or not */
+	unsigned int irq_cpu;	/* the cpu running the irq of this engine */
+	unsigned int max_req_used; /* debug stats */
+};
+
+#define MAX_SMP_CPU    8
+
+struct crypto_priv {
+	/* CE features supported by target device*/
+	struct msm_ce_hw_support platform_support;
+
+	/* CE features/algorithms supported by HW engine*/
+	struct ce_hw_support ce_support;
+
+	/* the lock protects crypto queue and req */
+	spinlock_t lock;
+
+	/* list of  registered algorithms */
+	struct list_head alg_list;
+
+	/* current active request */
+	struct crypto_async_request *req;
+
+	struct work_struct unlock_ce_ws;
+	struct list_head engine_list; /* list of  qcrypto engines */
+	int32_t total_units;   /* total units of engines */
+	struct mutex engine_lock;
+
+	struct crypto_engine *next_engine; /* next assign engine */
+	struct crypto_queue req_queue;	/*
+					 * request queue for those requests
+					 * that waiting for an available
+					 * engine.
+					 */
+	struct llist_head ordered_resp_list;	/* Queue to maintain
+						 * responses in sequence.
+						 */
+	atomic_t resp_cnt;
+	struct workqueue_struct *resp_wq;
+	struct work_struct resp_work;	/*
+					 * Workq to send responses
+					 * in sequence.
+					 */
+	enum resp_workq_sts sched_resp_workq_status;
+	enum req_processing_sts ce_req_proc_sts;
+	int cpu_getting_irqs_frm_first_ce;
+	struct crypto_engine *first_engine;
+	struct crypto_engine *scheduled_eng; /* last engine scheduled */
+
+	/* debug stats */
+	unsigned int no_avail;
+	unsigned int resp_stop;
+	unsigned int resp_start;
+	unsigned int max_qlen;
+	unsigned int queue_work_eng3;
+	unsigned int queue_work_not_eng3;
+	unsigned int queue_work_not_eng3_nz;
+	unsigned int max_resp_qlen;
+	unsigned int max_reorder_cnt;
+	unsigned int cpu_req[MAX_SMP_CPU+1];
+};
+static struct crypto_priv qcrypto_dev;
+static struct crypto_engine *_qcrypto_static_assign_engine(
+					struct crypto_priv *cp);
+static struct crypto_engine *_avail_eng(struct crypto_priv *cp);
+static struct qcrypto_req_control *qcrypto_alloc_req_control(
+						struct crypto_engine *pce)
+{
+	int i;
+	struct qcrypto_req_control *pqcrypto_req_control = pce->preq_pool;
+	unsigned int req_count;
+
+	for (i = 0; i < pce->max_req; i++) {
+		if (xchg(&pqcrypto_req_control->in_use, true) == false) {
+			req_count = atomic_inc_return(&pce->req_count);
+			if (req_count > pce->max_req_used)
+				pce->max_req_used = req_count;
+			return pqcrypto_req_control;
+		}
+		pqcrypto_req_control++;
+	}
+	return NULL;
+}
+
+static void qcrypto_free_req_control(struct crypto_engine *pce,
+					struct qcrypto_req_control *preq)
+{
+	/* do this before free req */
+	preq->req = NULL;
+	preq->arsp = NULL;
+	/* free req */
+	if (xchg(&preq->in_use, false) == false)
+		pr_warn("request info %p free already\n", preq);
+	else
+		atomic_dec(&pce->req_count);
+}
+
+static struct qcrypto_req_control *find_req_control_for_areq(
+					struct crypto_engine *pce,
+					struct crypto_async_request *areq)
+{
+	int i;
+	struct qcrypto_req_control *pqcrypto_req_control = pce->preq_pool;
+
+	for (i = 0; i < pce->max_req; i++) {
+		if (pqcrypto_req_control->req == areq)
+			return pqcrypto_req_control;
+		pqcrypto_req_control++;
+	}
+	return NULL;
+}
+
+static void qcrypto_init_req_control(struct crypto_engine *pce,
+			struct qcrypto_req_control *pqcrypto_req_control)
+{
+	int i;
+
+	pce->preq_pool = pqcrypto_req_control;
+	atomic_set(&pce->req_count, 0);
+	for (i = 0; i < pce->max_req; i++) {
+		pqcrypto_req_control->index = i;
+		pqcrypto_req_control->in_use = false;
+		pqcrypto_req_control->pce = pce;
+		pqcrypto_req_control++;
+	}
+}
+
+static struct crypto_engine *_qrypto_find_pengine_device(struct crypto_priv *cp,
+			 unsigned int device)
+{
+	struct crypto_engine *entry = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	list_for_each_entry(entry, &cp->engine_list, elist) {
+		if (entry->ce_device == device)
+			break;
+	}
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	if (((entry != NULL) && (entry->ce_device != device)) ||
+		(entry == NULL)) {
+		pr_err("Device node for CE device %d NOT FOUND!!\n",
+				device);
+		return NULL;
+	}
+
+	return entry;
+}
+
+static struct crypto_engine *_qrypto_find_pengine_device_hw
+			(struct crypto_priv *cp,
+			u32 device,
+			u32 hw_instance)
+{
+	struct crypto_engine *entry = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	list_for_each_entry(entry, &cp->engine_list, elist) {
+		if ((entry->ce_device == device) &&
+			(entry->ce_hw_instance == hw_instance))
+			break;
+	}
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	if (((entry != NULL) &&
+		((entry->ce_device != device)
+		|| (entry->ce_hw_instance != hw_instance)))
+		|| (entry == NULL)) {
+		pr_err("Device node for CE device %d NOT FOUND!!\n",
+						 device);
+		return NULL;
+	}
+	return entry;
+}
+
+int qcrypto_get_num_engines(void)
+{
+	struct crypto_priv *cp = &qcrypto_dev;
+	struct crypto_engine *entry = NULL;
+	int count = 0;
+
+	list_for_each_entry(entry, &cp->engine_list, elist) {
+		count++;
+	}
+	return count;
+}
+EXPORT_SYMBOL(qcrypto_get_num_engines);
+
+void qcrypto_get_engine_list(size_t num_engines,
+				struct crypto_engine_entry *arr)
+{
+	struct crypto_priv *cp = &qcrypto_dev;
+	struct crypto_engine *entry = NULL;
+	size_t arr_index = 0;
+
+	list_for_each_entry(entry, &cp->engine_list, elist) {
+		arr[arr_index].ce_device = entry->ce_device;
+		arr[arr_index].hw_instance = entry->ce_hw_instance;
+		arr_index++;
+		if (arr_index >= num_engines)
+			break;
+	}
+}
+EXPORT_SYMBOL(qcrypto_get_engine_list);
+
+enum qcrypto_alg_type {
+	QCRYPTO_ALG_CIPHER	= 0,
+	QCRYPTO_ALG_SHA	= 1,
+	QCRYPTO_ALG_AEAD = 2,
+	QCRYPTO_ALG_LAST
+};
+
+struct qcrypto_alg {
+	struct list_head entry;
+	struct crypto_alg cipher_alg;
+	struct ahash_alg sha_alg;
+	struct aead_alg aead_alg;
+	enum qcrypto_alg_type alg_type;
+	struct crypto_priv *cp;
+};
+
+#define QCRYPTO_MAX_KEY_SIZE	64
+/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+#define QCRYPTO_MAX_IV_LENGTH	16
+
+#define	QCRYPTO_CCM4309_NONCE_LEN	3
+
+struct qcrypto_cipher_ctx {
+	struct list_head rsp_queue;     /* response queue */
+	struct crypto_engine *pengine;  /* fixed engine assigned to this tfm */
+	struct crypto_priv *cp;
+	unsigned int flags;
+
+	enum qce_hash_alg_enum  auth_alg; /* for aead */
+	u8 auth_key[QCRYPTO_MAX_KEY_SIZE];
+	u8 iv[QCRYPTO_MAX_IV_LENGTH];
+
+	u8 enc_key[QCRYPTO_MAX_KEY_SIZE];
+	unsigned int enc_key_len;
+
+	unsigned int authsize;
+	unsigned int auth_key_len;
+
+	u8 ccm4309_nonce[QCRYPTO_CCM4309_NONCE_LEN];
+
+	struct crypto_skcipher *cipher_aes192_fb;
+
+	struct crypto_ahash *ahash_aead_aes192_fb;
+};
+
+struct qcrypto_resp_ctx {
+	struct list_head list;
+	struct llist_node llist;
+	struct crypto_async_request *async_req; /* async req */
+	int res;                                /* execution result */
+};
+
+struct qcrypto_cipher_req_ctx {
+	struct qcrypto_resp_ctx rsp_entry;/* rsp entry. */
+	struct crypto_engine *pengine;  /* engine assigned to this request */
+	u8 *iv;
+	u8 rfc4309_iv[QCRYPTO_MAX_IV_LENGTH];
+	unsigned int ivsize;
+	int  aead;
+	struct scatterlist asg;		/* Formatted associated data sg  */
+	unsigned char *adata;		/* Pointer to formatted assoc data */
+	enum qce_cipher_alg_enum alg;
+	enum qce_cipher_dir_enum dir;
+	enum qce_cipher_mode_enum mode;
+
+	struct scatterlist *orig_src;	/* Original src sg ptr  */
+	struct scatterlist *orig_dst;	/* Original dst sg ptr  */
+	struct scatterlist dsg;		/* Dest Data sg  */
+	struct scatterlist ssg;		/* Source Data sg  */
+	unsigned char *data;		/* Incoming data pointer*/
+
+	struct aead_request *aead_req;
+	struct ahash_request *fb_hash_req;
+	uint8_t	fb_ahash_digest[SHA256_DIGEST_SIZE];
+	struct scatterlist fb_ablkcipher_src_sg[2];
+	struct scatterlist fb_ablkcipher_dst_sg[2];
+	char *fb_aes_iv;
+	unsigned int  fb_ahash_length;
+	struct skcipher_request *fb_aes_req;
+	struct scatterlist *fb_aes_src;
+	struct scatterlist *fb_aes_dst;
+	unsigned int  fb_aes_cryptlen;
+};
+
+#define SHA_MAX_BLOCK_SIZE      SHA256_BLOCK_SIZE
+#define SHA_MAX_STATE_SIZE	(SHA256_DIGEST_SIZE / sizeof(u32))
+#define SHA_MAX_DIGEST_SIZE	 SHA256_DIGEST_SIZE
+
+#define	MSM_QCRYPTO_REQ_QUEUE_LENGTH 768
+#define	COMPLETION_CB_BACKLOG_LENGTH_STOP 400
+#define	COMPLETION_CB_BACKLOG_LENGTH_START \
+			(COMPLETION_CB_BACKLOG_LENGTH_STOP / 2)
+
+static uint8_t  _std_init_vector_sha1_uint8[] =   {
+	0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
+	0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
+	0xC3, 0xD2, 0xE1, 0xF0
+};
+
+/* standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint8_t _std_init_vector_sha256_uint8[] = {
+	0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
+	0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
+	0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
+	0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
+};
+
+struct qcrypto_sha_ctx {
+	struct list_head rsp_queue;     /* response queue */
+	struct crypto_engine *pengine;  /* fixed engine assigned to this tfm */
+	struct crypto_priv *cp;
+	unsigned int flags;
+	enum qce_hash_alg_enum  alg;
+	uint32_t		diglen;
+	uint32_t		authkey_in_len;
+	uint8_t			authkey[SHA_MAX_BLOCK_SIZE];
+	struct ahash_request *ahash_req;
+	struct completion ahash_req_complete;
+};
+
+struct qcrypto_sha_req_ctx {
+	struct qcrypto_resp_ctx rsp_entry;/* rsp entry. */
+	struct crypto_engine *pengine;  /* engine assigned to this request */
+
+	struct scatterlist *src;
+	uint32_t nbytes;
+
+	struct scatterlist *orig_src;	/* Original src sg ptr  */
+	struct scatterlist dsg;		/* Data sg */
+	unsigned char *data;		/* Incoming data pointer*/
+	unsigned char *data2;		/* Updated data pointer*/
+
+	uint32_t byte_count[4];
+	u64 count;
+	uint8_t	first_blk;
+	uint8_t	last_blk;
+	uint8_t	 trailing_buf[SHA_MAX_BLOCK_SIZE];
+	uint32_t trailing_buf_len;
+
+	/* dma buffer, Internal use */
+	uint8_t	staging_dmabuf
+		[SHA_MAX_BLOCK_SIZE+SHA_MAX_DIGEST_SIZE+MAX_ALIGN_SIZE];
+
+	uint8_t	digest[SHA_MAX_DIGEST_SIZE];
+	struct scatterlist sg[2];
+};
+
+static void _byte_stream_to_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned int n;
+
+	n = len  / sizeof(uint32_t);
+	for (; n > 0; n--) {
+		*iv =  ((*b << 24)      & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     |
+				(*(b+3)          & 0xff);
+		b += sizeof(uint32_t);
+		iv++;
+	}
+
+	n = len %  sizeof(uint32_t);
+	if (n == 3) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00);
+	} else if (n == 2) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000);
+	} else if (n == 1) {
+		*iv = ((*b << 24) & 0xff000000);
+	}
+}
+
+static void _words_to_byte_stream(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned int n = len  / sizeof(uint32_t);
+
+	for (; n > 0; n--) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 16)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 8)    & 0xff);
+		*b++ = (unsigned char) (*iv           & 0xff);
+		iv++;
+	}
+	n = len % sizeof(uint32_t);
+	if (n == 3) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 16)   & 0xff);
+		*b =   (unsigned char) ((*iv >> 8)    & 0xff);
+	} else if (n == 2) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b =   (unsigned char) ((*iv >> 16)   & 0xff);
+	} else if (n == 1) {
+		*b =   (unsigned char) ((*iv >> 24)   & 0xff);
+	}
+}
+
+static void qcrypto_ce_set_bus(struct crypto_engine *pengine,
+				 bool high_bw_req)
+{
+	int ret = 0;
+
+	if (high_bw_req) {
+		ret = qce_enable_clk(pengine->qce);
+		if (ret) {
+			pr_err("%s Unable enable clk\n", __func__);
+			goto clk_err;
+		}
+		ret = msm_bus_scale_client_update_request(
+				pengine->bus_scale_handle, 1);
+		if (ret) {
+			pr_err("%s Unable to set to high bandwidth\n",
+						__func__);
+			qce_disable_clk(pengine->qce);
+			goto clk_err;
+		}
+	} else {
+		ret = msm_bus_scale_client_update_request(
+				pengine->bus_scale_handle, 0);
+		if (ret) {
+			pr_err("%s Unable to set to low bandwidth\n",
+						__func__);
+			goto clk_err;
+		}
+		ret = qce_disable_clk(pengine->qce);
+		if (ret) {
+			pr_err("%s Unable disable clk\n", __func__);
+			ret = msm_bus_scale_client_update_request(
+				pengine->bus_scale_handle, 1);
+			if (ret)
+				pr_err("%s Unable to set to high bandwidth\n",
+						__func__);
+			goto clk_err;
+		}
+	}
+clk_err:
+	return;
+
+}
+
+static void qcrypto_bw_reaper_timer_callback(unsigned long data)
+{
+	struct crypto_engine *pengine = (struct crypto_engine *)data;
+
+	schedule_work(&pengine->bw_reaper_ws);
+}
+
+static void qcrypto_bw_set_timeout(struct crypto_engine *pengine)
+{
+	pengine->bw_reaper_timer.data =
+			(unsigned long)(pengine);
+	pengine->bw_reaper_timer.expires = jiffies +
+			msecs_to_jiffies(QCRYPTO_HIGH_BANDWIDTH_TIMEOUT);
+	mod_timer(&(pengine->bw_reaper_timer),
+		pengine->bw_reaper_timer.expires);
+}
+
+static void qcrypto_ce_bw_allocate_req(struct crypto_engine *pengine)
+{
+	schedule_work(&pengine->bw_allocate_ws);
+}
+
+static int _start_qcrypto_process(struct crypto_priv *cp,
+					struct crypto_engine *pengine);
+
+static void qcrypto_bw_allocate_work(struct work_struct *work)
+{
+	struct  crypto_engine *pengine = container_of(work,
+				struct crypto_engine, bw_allocate_ws);
+	unsigned long flags;
+	struct crypto_priv *cp = pengine->pcp;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	pengine->bw_state = BUS_BANDWIDTH_ALLOCATING;
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	qcrypto_ce_set_bus(pengine, true);
+	qcrypto_bw_set_timeout(pengine);
+	spin_lock_irqsave(&cp->lock, flags);
+	pengine->bw_state = BUS_HAS_BANDWIDTH;
+	pengine->high_bw_req = false;
+	pengine->active_seq++;
+	pengine->check_flag = true;
+	spin_unlock_irqrestore(&cp->lock, flags);
+	_start_qcrypto_process(cp, pengine);
+};
+
+static void qcrypto_bw_reaper_work(struct work_struct *work)
+{
+	struct  crypto_engine *pengine = container_of(work,
+				struct crypto_engine, bw_reaper_ws);
+	struct crypto_priv *cp = pengine->pcp;
+	unsigned long flags;
+	u32    active_seq;
+	bool restart = false;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	active_seq = pengine->active_seq;
+	if (pengine->bw_state == BUS_HAS_BANDWIDTH &&
+		(active_seq == pengine->last_active_seq)) {
+
+		/* check if engine is stuck */
+		if (atomic_read(&pengine->req_count) > 0) {
+			if (pengine->check_flag)
+				dev_warn(&pengine->pdev->dev,
+				"The engine appears to be stuck seq %d.\n",
+				active_seq);
+			pengine->check_flag = false;
+			goto ret;
+		}
+		if (cp->platform_support.bus_scale_table == NULL)
+			goto ret;
+		pengine->bw_state = BUS_BANDWIDTH_RELEASING;
+		spin_unlock_irqrestore(&cp->lock, flags);
+
+		qcrypto_ce_set_bus(pengine, false);
+
+		spin_lock_irqsave(&cp->lock, flags);
+
+		if (pengine->high_bw_req == true) {
+			/* we got request while we are disabling clock */
+			pengine->bw_state = BUS_BANDWIDTH_ALLOCATING;
+			spin_unlock_irqrestore(&cp->lock, flags);
+
+			qcrypto_ce_set_bus(pengine, true);
+
+			spin_lock_irqsave(&cp->lock, flags);
+			pengine->bw_state = BUS_HAS_BANDWIDTH;
+			pengine->high_bw_req = false;
+			restart = true;
+		} else
+			pengine->bw_state = BUS_NO_BANDWIDTH;
+	}
+ret:
+	pengine->last_active_seq = active_seq;
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (restart)
+		_start_qcrypto_process(cp, pengine);
+	if (pengine->bw_state != BUS_NO_BANDWIDTH)
+		qcrypto_bw_set_timeout(pengine);
+}
+
+static int qcrypto_count_sg(struct scatterlist *sg, int nbytes)
+{
+	int i;
+
+	for (i = 0; nbytes > 0 && sg != NULL; i++, sg = sg_next(sg))
+		nbytes -= sg->length;
+
+	return i;
+}
+
+static size_t qcrypto_sg_copy_from_buffer(struct scatterlist *sgl,
+				unsigned int nents, void *buf, size_t buflen)
+{
+	int i;
+	size_t offset, len;
+
+	for (i = 0, offset = 0; i < nents; ++i) {
+		len = sg_copy_from_buffer(sgl, 1, buf, buflen);
+		buf += len;
+		buflen -= len;
+		offset += len;
+		sgl = sg_next(sgl);
+	}
+
+	return offset;
+}
+
+static size_t qcrypto_sg_copy_to_buffer(struct scatterlist *sgl,
+				unsigned int nents, void *buf, size_t buflen)
+{
+	int i;
+	size_t offset, len;
+
+	for (i = 0, offset = 0; i < nents; ++i) {
+		len = sg_copy_to_buffer(sgl, 1, buf, buflen);
+		buf += len;
+		buflen -= len;
+		offset += len;
+		sgl = sg_next(sgl);
+	}
+
+	return offset;
+}
+static struct qcrypto_alg *_qcrypto_sha_alg_alloc(struct crypto_priv *cp,
+		struct ahash_alg *template)
+{
+	struct qcrypto_alg *q_alg;
+
+	q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+	if (!q_alg)
+		return ERR_PTR(-ENOMEM);
+
+	q_alg->alg_type = QCRYPTO_ALG_SHA;
+	q_alg->sha_alg = *template;
+	q_alg->cp = cp;
+
+	return q_alg;
+};
+
+static struct qcrypto_alg *_qcrypto_cipher_alg_alloc(struct crypto_priv *cp,
+		struct crypto_alg *template)
+{
+	struct qcrypto_alg *q_alg;
+
+	q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+	if (!q_alg)
+		return ERR_PTR(-ENOMEM);
+
+	q_alg->alg_type = QCRYPTO_ALG_CIPHER;
+	q_alg->cipher_alg = *template;
+	q_alg->cp = cp;
+
+	return q_alg;
+};
+
+static struct qcrypto_alg *_qcrypto_aead_alg_alloc(struct crypto_priv *cp,
+		struct aead_alg *template)
+{
+	struct qcrypto_alg *q_alg;
+
+	q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+	if (!q_alg)
+		return ERR_PTR(-ENOMEM);
+
+	q_alg->alg_type = QCRYPTO_ALG_AEAD;
+	q_alg->aead_alg = *template;
+	q_alg->cp = cp;
+
+	return q_alg;
+};
+
+static int _qcrypto_cipher_ctx_init(struct qcrypto_cipher_ctx *ctx,
+					struct qcrypto_alg *q_alg)
+{
+	if (!ctx || !q_alg) {
+		pr_err("ctx or q_alg is NULL\n");
+		return -EINVAL;
+	}
+	ctx->flags = 0;
+	/* update context with ptr to cp */
+	ctx->cp = q_alg->cp;
+	/* random first IV */
+	get_random_bytes(ctx->iv, QCRYPTO_MAX_IV_LENGTH);
+	if (_qcrypto_init_assign) {
+		ctx->pengine = _qcrypto_static_assign_engine(ctx->cp);
+		if (ctx->pengine == NULL)
+			return -ENODEV;
+	} else
+		ctx->pengine = NULL;
+	INIT_LIST_HEAD(&ctx->rsp_queue);
+	ctx->auth_alg = QCE_HASH_LAST;
+	return 0;
+}
+
+static int _qcrypto_cipher_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct qcrypto_alg *q_alg;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	q_alg = container_of(alg, struct qcrypto_alg, cipher_alg);
+	return _qcrypto_cipher_ctx_init(ctx, q_alg);
+};
+
+static int _qcrypto_ahash_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+	struct ahash_alg *alg =	container_of(crypto_hash_alg_common(ahash),
+						struct ahash_alg, halg);
+	struct qcrypto_alg *q_alg = container_of(alg, struct qcrypto_alg,
+								sha_alg);
+
+	crypto_ahash_set_reqsize(ahash, sizeof(struct qcrypto_sha_req_ctx));
+	/* update context with ptr to cp */
+	sha_ctx->cp = q_alg->cp;
+	sha_ctx->flags = 0;
+	sha_ctx->ahash_req = NULL;
+	if (_qcrypto_init_assign) {
+		sha_ctx->pengine = _qcrypto_static_assign_engine(sha_ctx->cp);
+		if (sha_ctx->pengine == NULL)
+			return -ENODEV;
+	} else
+		sha_ctx->pengine = NULL;
+	INIT_LIST_HEAD(&sha_ctx->rsp_queue);
+	return 0;
+};
+
+static void _qcrypto_ahash_cra_exit(struct crypto_tfm *tfm)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+
+	if (!list_empty(&sha_ctx->rsp_queue))
+		pr_err("_qcrypto_ahash_cra_exit: requests still outstanding");
+	if (sha_ctx->ahash_req != NULL) {
+		ahash_request_free(sha_ctx->ahash_req);
+		sha_ctx->ahash_req = NULL;
+	}
+};
+
+
+static void _crypto_sha_hmac_ahash_req_complete(
+	struct crypto_async_request *req, int err);
+
+static int _qcrypto_ahash_hmac_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+	int ret = 0;
+
+	ret = _qcrypto_ahash_cra_init(tfm);
+	if (ret)
+		return ret;
+	sha_ctx->ahash_req = ahash_request_alloc(ahash, GFP_KERNEL);
+
+	if (sha_ctx->ahash_req == NULL) {
+		_qcrypto_ahash_cra_exit(tfm);
+		return -ENOMEM;
+	}
+
+	init_completion(&sha_ctx->ahash_req_complete);
+	ahash_request_set_callback(sha_ctx->ahash_req,
+				CRYPTO_TFM_REQ_MAY_BACKLOG,
+				_crypto_sha_hmac_ahash_req_complete,
+				&sha_ctx->ahash_req_complete);
+	crypto_ahash_clear_flags(ahash, ~0);
+
+	return 0;
+};
+
+static int _qcrypto_cra_ablkcipher_init(struct crypto_tfm *tfm)
+{
+	tfm->crt_ablkcipher.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
+	return _qcrypto_cipher_cra_init(tfm);
+};
+
+static int _qcrypto_cra_aes_ablkcipher_init(struct crypto_tfm *tfm)
+{
+	const char *name = tfm->__crt_alg->cra_name;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	int ret;
+	struct crypto_priv *cp = &qcrypto_dev;
+
+	if (cp->ce_support.use_sw_aes_cbc_ecb_ctr_algo) {
+		ctx->cipher_aes192_fb = NULL;
+		return _qcrypto_cra_ablkcipher_init(tfm);
+	}
+	ctx->cipher_aes192_fb = crypto_alloc_skcipher(name, 0,
+			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(ctx->cipher_aes192_fb)) {
+		pr_err("Error allocating fallback algo %s\n", name);
+		ret = PTR_ERR(ctx->cipher_aes192_fb);
+		ctx->cipher_aes192_fb = NULL;
+		return ret;
+	}
+	return _qcrypto_cra_ablkcipher_init(tfm);
+};
+
+static int _qcrypto_aead_cra_init(struct crypto_aead *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_alg *aeadalg = crypto_aead_alg(tfm);
+	struct qcrypto_alg *q_alg = container_of(aeadalg, struct qcrypto_alg,
+						aead_alg);
+	return _qcrypto_cipher_ctx_init(ctx, q_alg);
+};
+
+static int _qcrypto_cra_aead_sha1_init(struct crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	ctx->auth_alg = QCE_HASH_SHA1_HMAC;
+	return rc;
+}
+
+static int _qcrypto_cra_aead_sha256_init(struct crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	ctx->auth_alg = QCE_HASH_SHA256_HMAC;
+	return rc;
+}
+
+static int _qcrypto_cra_aead_ccm_init(struct  crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	ctx->auth_alg =  QCE_HASH_AES_CMAC;
+	return rc;
+}
+
+static int _qcrypto_cra_aead_rfc4309_ccm_init(struct  crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	ctx->auth_alg =  QCE_HASH_AES_CMAC;
+	return rc;
+}
+
+static int _qcrypto_cra_aead_aes_sha1_init(struct crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+	struct crypto_priv *cp = &qcrypto_dev;
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	if (rc)
+		return rc;
+	ctx->cipher_aes192_fb = NULL;
+	ctx->ahash_aead_aes192_fb = NULL;
+	if (!cp->ce_support.aes_key_192) {
+		ctx->cipher_aes192_fb = crypto_alloc_skcipher(
+							"cbc(aes)", 0, 0);
+		if (IS_ERR(ctx->cipher_aes192_fb)) {
+			ctx->cipher_aes192_fb = NULL;
+		} else {
+			ctx->ahash_aead_aes192_fb = crypto_alloc_ahash(
+							"hmac(sha1)", 0, 0);
+			if (IS_ERR(ctx->ahash_aead_aes192_fb)) {
+				ctx->ahash_aead_aes192_fb = NULL;
+				crypto_free_skcipher(ctx->cipher_aes192_fb);
+				ctx->cipher_aes192_fb = NULL;
+			}
+		}
+	}
+	ctx->auth_alg = QCE_HASH_SHA1_HMAC;
+	return 0;
+}
+
+static int _qcrypto_cra_aead_aes_sha256_init(struct crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+	struct crypto_priv *cp = &qcrypto_dev;
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	if (rc)
+		return rc;
+	ctx->cipher_aes192_fb = NULL;
+	ctx->ahash_aead_aes192_fb = NULL;
+	if (!cp->ce_support.aes_key_192) {
+		ctx->cipher_aes192_fb = crypto_alloc_skcipher(
+							"cbc(aes)", 0, 0);
+		if (IS_ERR(ctx->cipher_aes192_fb)) {
+			ctx->cipher_aes192_fb = NULL;
+		} else {
+			ctx->ahash_aead_aes192_fb = crypto_alloc_ahash(
+							"hmac(sha256)", 0, 0);
+			if (IS_ERR(ctx->ahash_aead_aes192_fb)) {
+				ctx->ahash_aead_aes192_fb = NULL;
+				crypto_free_skcipher(ctx->cipher_aes192_fb);
+				ctx->cipher_aes192_fb = NULL;
+			}
+		}
+	}
+	ctx->auth_alg = QCE_HASH_SHA256_HMAC;
+	return 0;
+}
+
+static void _qcrypto_cra_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (!list_empty(&ctx->rsp_queue))
+		pr_err("_qcrypto__cra_ablkcipher_exit: requests still outstanding");
+};
+
+static void _qcrypto_cra_aes_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	_qcrypto_cra_ablkcipher_exit(tfm);
+	if (ctx->cipher_aes192_fb)
+		crypto_free_skcipher(ctx->cipher_aes192_fb);
+	ctx->cipher_aes192_fb = NULL;
+}
+
+static void _qcrypto_cra_aead_exit(struct crypto_aead *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	if (!list_empty(&ctx->rsp_queue))
+		pr_err("_qcrypto__cra_aead_exit: requests still outstanding");
+}
+
+static void _qcrypto_cra_aead_aes_exit(struct crypto_aead *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	if (!list_empty(&ctx->rsp_queue))
+		pr_err("_qcrypto__cra_aead_exit: requests still outstanding");
+	if (ctx->cipher_aes192_fb)
+		crypto_free_skcipher(ctx->cipher_aes192_fb);
+	if (ctx->ahash_aead_aes192_fb)
+		crypto_free_ahash(ctx->ahash_aead_aes192_fb);
+	ctx->cipher_aes192_fb = NULL;
+	ctx->ahash_aead_aes192_fb = NULL;
+}
+
+static int _disp_stats(int id)
+{
+	struct crypto_stat *pstat;
+	int len = 0;
+	unsigned long flags;
+	struct crypto_priv *cp = &qcrypto_dev;
+	struct crypto_engine *pe;
+	int i;
+
+	pstat = &_qcrypto_stat;
+	len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+			"\nQTI crypto accelerator %d Statistics\n",
+				id + 1);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER AES encryption          : %llu\n",
+					pstat->ablk_cipher_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER AES decryption          : %llu\n",
+					pstat->ablk_cipher_aes_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER DES encryption          : %llu\n",
+					pstat->ablk_cipher_des_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER DES decryption          : %llu\n",
+					pstat->ablk_cipher_des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER 3DES encryption         : %llu\n",
+					pstat->ablk_cipher_3des_enc);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER 3DES decryption         : %llu\n",
+					pstat->ablk_cipher_3des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER operation success       : %llu\n",
+					pstat->ablk_cipher_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER operation fail          : %llu\n",
+					pstat->ablk_cipher_op_fail);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"\n");
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-AES encryption            : %llu\n",
+					pstat->aead_sha1_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-AES decryption            : %llu\n",
+					pstat->aead_sha1_aes_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-DES encryption            : %llu\n",
+					pstat->aead_sha1_des_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-DES decryption            : %llu\n",
+					pstat->aead_sha1_des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-3DES encryption           : %llu\n",
+					pstat->aead_sha1_3des_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-3DES decryption           : %llu\n",
+					pstat->aead_sha1_3des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-AES encryption          : %llu\n",
+					pstat->aead_sha256_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-AES decryption          : %llu\n",
+					pstat->aead_sha256_aes_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-DES encryption          : %llu\n",
+					pstat->aead_sha256_des_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-DES decryption          : %llu\n",
+					pstat->aead_sha256_des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-3DES encryption         : %llu\n",
+					pstat->aead_sha256_3des_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-3DES decryption         : %llu\n",
+					pstat->aead_sha256_3des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD CCM-AES encryption             : %llu\n",
+					pstat->aead_ccm_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD CCM-AES decryption             : %llu\n",
+					pstat->aead_ccm_aes_dec);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD RFC4309-CCM-AES encryption     : %llu\n",
+					pstat->aead_rfc4309_ccm_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD RFC4309-CCM-AES decryption     : %llu\n",
+					pstat->aead_rfc4309_ccm_aes_dec);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD operation success              : %llu\n",
+					pstat->aead_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD operation fail                 : %llu\n",
+					pstat->aead_op_fail);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD bad message                    : %llu\n",
+					pstat->aead_bad_msg);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"\n");
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH SHA1 digest                   : %llu\n",
+					pstat->sha1_digest);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH SHA256 digest                 : %llu\n",
+					pstat->sha256_digest);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH SHA1 HMAC digest              : %llu\n",
+					pstat->sha1_hmac_digest);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH SHA256 HMAC digest            : %llu\n",
+					pstat->sha256_hmac_digest);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH operation success             : %llu\n",
+					pstat->ahash_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH operation fail                : %llu\n",
+					pstat->ahash_op_fail);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   resp start, resp stop, max rsp queue reorder-cnt : %u %u %u %u\n",
+					cp->resp_start, cp->resp_stop,
+					cp->max_resp_qlen, cp->max_reorder_cnt);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   max queue legnth, no avail          : %u %u\n",
+					cp->max_qlen, cp->no_avail);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   work queue                          : %u %u %u\n",
+					cp->queue_work_eng3,
+					cp->queue_work_not_eng3,
+					cp->queue_work_not_eng3_nz);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"\n");
+	spin_lock_irqsave(&cp->lock, flags);
+	list_for_each_entry(pe, &cp->engine_list, elist) {
+		len += scnprintf(
+			_debug_read_buf + len,
+			DEBUG_MAX_RW_BUF - len - 1,
+			"   Engine %4d Req max %d          : %llu\n",
+			pe->unit,
+			pe->max_req_used,
+			pe->total_req
+		);
+		len += scnprintf(
+			_debug_read_buf + len,
+			DEBUG_MAX_RW_BUF - len - 1,
+			"   Engine %4d Req Error               : %llu\n",
+			pe->unit,
+			pe->err_req
+		);
+		qce_get_driver_stats(pe->qce);
+	}
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	for (i = 0; i < MAX_SMP_CPU+1; i++)
+		if (cp->cpu_req[i])
+			len += scnprintf(
+				_debug_read_buf + len,
+				DEBUG_MAX_RW_BUF - len - 1,
+				"CPU %d Issue Req                     : %d\n",
+				i, cp->cpu_req[i]);
+	return len;
+}
+
+static void _qcrypto_remove_engine(struct crypto_engine *pengine)
+{
+	struct crypto_priv *cp;
+	struct qcrypto_alg *q_alg;
+	struct qcrypto_alg *n;
+	unsigned long flags;
+	struct crypto_engine *pe;
+
+	cp = pengine->pcp;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	list_del(&pengine->elist);
+	if (pengine->first_engine) {
+		cp->first_engine = NULL;
+		pe = list_first_entry(&cp->engine_list, struct crypto_engine,
+								elist);
+		if (pe) {
+			pe->first_engine = true;
+			cp->first_engine = pe;
+		}
+	}
+	if (cp->next_engine == pengine)
+		cp->next_engine = NULL;
+	if (cp->scheduled_eng == pengine)
+		cp->scheduled_eng = NULL;
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	cp->total_units--;
+
+	cancel_work_sync(&pengine->bw_reaper_ws);
+	cancel_work_sync(&pengine->bw_allocate_ws);
+	del_timer_sync(&pengine->bw_reaper_timer);
+
+	if (pengine->bus_scale_handle != 0)
+		msm_bus_scale_unregister_client(pengine->bus_scale_handle);
+	pengine->bus_scale_handle = 0;
+
+	kzfree(pengine->preq_pool);
+
+	if (cp->total_units)
+		return;
+
+	list_for_each_entry_safe(q_alg, n, &cp->alg_list, entry) {
+		if (q_alg->alg_type == QCRYPTO_ALG_CIPHER)
+			crypto_unregister_alg(&q_alg->cipher_alg);
+		if (q_alg->alg_type == QCRYPTO_ALG_SHA)
+			crypto_unregister_ahash(&q_alg->sha_alg);
+		if (q_alg->alg_type == QCRYPTO_ALG_AEAD)
+			crypto_unregister_aead(&q_alg->aead_alg);
+		list_del(&q_alg->entry);
+		kzfree(q_alg);
+	}
+}
+
+static int _qcrypto_remove(struct platform_device *pdev)
+{
+	struct crypto_engine *pengine;
+	struct crypto_priv *cp;
+
+	pengine = platform_get_drvdata(pdev);
+
+	if (!pengine)
+		return 0;
+	cp = pengine->pcp;
+	mutex_lock(&cp->engine_lock);
+	_qcrypto_remove_engine(pengine);
+	mutex_unlock(&cp->engine_lock);
+	if (pengine->qce)
+		qce_close(pengine->qce);
+	kzfree(pengine);
+	return 0;
+}
+
+static int _qcrypto_check_aes_keylen(struct crypto_ablkcipher *cipher,
+		struct crypto_priv *cp, unsigned int len)
+{
+
+	switch (len) {
+	case AES_KEYSIZE_128:
+	case AES_KEYSIZE_256:
+		break;
+	case AES_KEYSIZE_192:
+		if (cp->ce_support.aes_key_192)
+			break;
+	default:
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	};
+
+	return 0;
+}
+
+static int _qcrypto_setkey_aes_192_fallback(struct crypto_ablkcipher *cipher,
+		const u8 *key)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	int ret;
+
+	ctx->enc_key_len = AES_KEYSIZE_192;
+	ctx->cipher_aes192_fb->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+	ctx->cipher_aes192_fb->base.crt_flags |=
+			(cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+	ret = crypto_skcipher_setkey(ctx->cipher_aes192_fb, key,
+			AES_KEYSIZE_192);
+	if (ret) {
+		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+		tfm->crt_flags |=
+			(cipher->base.crt_flags & CRYPTO_TFM_RES_MASK);
+	}
+	return ret;
+}
+
+static int _qcrypto_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
+		unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY)
+		return 0;
+
+	if ((len == AES_KEYSIZE_192) && (!cp->ce_support.aes_key_192)
+					&& ctx->cipher_aes192_fb)
+		return _qcrypto_setkey_aes_192_fallback(cipher, key);
+
+	if (_qcrypto_check_aes_keylen(cipher, cp, len))
+		return -EINVAL;
+
+	ctx->enc_key_len = len;
+	if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY))  {
+		if (key != NULL) {
+			memcpy(ctx->enc_key, key, len);
+		} else {
+			pr_err("%s Inavlid key pointer\n", __func__);
+			return -EINVAL;
+		}
+	}
+	return 0;
+};
+
+static int _qcrypto_setkey_aes_xts(struct crypto_ablkcipher *cipher,
+		const u8 *key, unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY)
+		return 0;
+	if (_qcrypto_check_aes_keylen(cipher, cp, len/2))
+		return -EINVAL;
+
+	ctx->enc_key_len = len;
+	if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY))  {
+		if (key != NULL) {
+			memcpy(ctx->enc_key, key, len);
+		} else {
+			pr_err("%s Inavlid key pointer\n", __func__);
+			return -EINVAL;
+		}
+	}
+	return 0;
+};
+
+static int _qcrypto_setkey_des(struct crypto_ablkcipher *cipher, const u8 *key,
+		unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	u32 tmp[DES_EXPKEY_WORDS];
+	int ret;
+
+	if (!key) {
+		pr_err("%s Inavlid key pointer\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = des_ekey(tmp, key);
+
+	if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+		pr_err("%s HW KEY usage not supported for DES algorithm\n",
+								__func__);
+		return 0;
+	};
+
+	if (len != DES_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	};
+
+	if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		return -EINVAL;
+	}
+
+	ctx->enc_key_len = len;
+	if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY))
+		memcpy(ctx->enc_key, key, len);
+
+	return 0;
+};
+
+static int _qcrypto_setkey_3des(struct crypto_ablkcipher *cipher, const u8 *key,
+		unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+		pr_err("%s HW KEY usage not supported for 3DES algorithm\n",
+								__func__);
+		return 0;
+	};
+	if (len != DES3_EDE_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	};
+	ctx->enc_key_len = len;
+	if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY)) {
+		if (key != NULL) {
+			memcpy(ctx->enc_key, key, len);
+		} else {
+			pr_err("%s Inavlid key pointer\n", __func__);
+			return -EINVAL;
+		}
+	}
+	return 0;
+};
+
+static void seq_response(struct work_struct *work)
+{
+	struct crypto_priv *cp = container_of(work, struct crypto_priv,
+							 resp_work);
+	struct llist_node *list;
+	struct llist_node *rev = NULL;
+	struct crypto_engine *pengine;
+	unsigned long flags;
+	int total_unit;
+
+again:
+	list = llist_del_all(&cp->ordered_resp_list);
+
+	if (!list)
+		goto end;
+
+	while (list) {
+		struct llist_node *t = list;
+
+		list = llist_next(list);
+		t->next = rev;
+		rev = t;
+	}
+
+	while (rev) {
+		struct qcrypto_resp_ctx *arsp;
+		struct crypto_async_request *areq;
+
+		arsp = container_of(rev, struct qcrypto_resp_ctx, llist);
+		rev = llist_next(rev);
+
+		areq = arsp->async_req;
+		local_bh_disable();
+		areq->complete(areq, arsp->res);
+		local_bh_enable();
+		atomic_dec(&cp->resp_cnt);
+	}
+
+	if (atomic_read(&cp->resp_cnt) < COMPLETION_CB_BACKLOG_LENGTH_START &&
+		(cmpxchg(&cp->ce_req_proc_sts, STOPPED, IN_PROGRESS)
+						== STOPPED)) {
+		cp->resp_start++;
+		for (total_unit = cp->total_units; total_unit-- > 0;) {
+			spin_lock_irqsave(&cp->lock, flags);
+			pengine = _avail_eng(cp);
+			spin_unlock_irqrestore(&cp->lock, flags);
+			if (pengine)
+				_start_qcrypto_process(cp, pengine);
+			else
+				break;
+		}
+	}
+end:
+	if (cmpxchg(&cp->sched_resp_workq_status, SCHEDULE_AGAIN,
+				IS_SCHEDULED) == SCHEDULE_AGAIN)
+		goto again;
+	else if (cmpxchg(&cp->sched_resp_workq_status, IS_SCHEDULED,
+				NOT_SCHEDULED) == SCHEDULE_AGAIN)
+		goto end;
+}
+
+#define SCHEUDLE_RSP_QLEN_THRESHOLD 64
+
+static void _qcrypto_tfm_complete(struct crypto_engine *pengine, u32 type,
+					void *tfm_ctx,
+					struct qcrypto_resp_ctx *cur_arsp,
+					int res)
+{
+	struct crypto_priv *cp = pengine->pcp;
+	unsigned long flags;
+	struct qcrypto_resp_ctx *arsp;
+	struct list_head *plist;
+	unsigned int resp_qlen;
+	unsigned int cnt = 0;
+
+	switch (type) {
+	case CRYPTO_ALG_TYPE_AHASH:
+		plist = &((struct qcrypto_sha_ctx *) tfm_ctx)->rsp_queue;
+		break;
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+	case CRYPTO_ALG_TYPE_AEAD:
+	default:
+		plist = &((struct qcrypto_cipher_ctx *) tfm_ctx)->rsp_queue;
+		break;
+	}
+
+	spin_lock_irqsave(&cp->lock, flags);
+
+	cur_arsp->res = res;
+	while (!list_empty(plist)) {
+		arsp = list_first_entry(plist,
+				struct qcrypto_resp_ctx, list);
+		if (arsp->res == -EINPROGRESS)
+			break;
+		list_del(&arsp->list);
+		llist_add(&arsp->llist, &cp->ordered_resp_list);
+		atomic_inc(&cp->resp_cnt);
+		cnt++;
+	}
+	resp_qlen = atomic_read(&cp->resp_cnt);
+	if (resp_qlen > cp->max_resp_qlen)
+		cp->max_resp_qlen = resp_qlen;
+	if (cnt > cp->max_reorder_cnt)
+		cp->max_reorder_cnt = cnt;
+	if ((resp_qlen >= COMPLETION_CB_BACKLOG_LENGTH_STOP) &&
+		cmpxchg(&cp->ce_req_proc_sts, IN_PROGRESS,
+						STOPPED) == IN_PROGRESS) {
+		cp->resp_stop++;
+	}
+
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+retry:
+	if (!llist_empty(&cp->ordered_resp_list)) {
+		unsigned int cpu;
+
+		if (pengine->first_engine) {
+			cpu = WORK_CPU_UNBOUND;
+			cp->queue_work_eng3++;
+		} else {
+			cp->queue_work_not_eng3++;
+			cpu = cp->cpu_getting_irqs_frm_first_ce;
+			/*
+			 * If source not the first engine, and there
+			 * are outstanding requests going on first engine,
+			 * skip scheduling of work queue to anticipate
+			 * more may be coming. If the response queue
+			 * length exceeds threshold, to avoid further
+			 * delay, schedule work queue immediately.
+			 */
+			if (cp->first_engine && atomic_read(
+						&cp->first_engine->req_count)) {
+				if (resp_qlen < SCHEUDLE_RSP_QLEN_THRESHOLD)
+					return;
+				cp->queue_work_not_eng3_nz++;
+			}
+		}
+		if (cmpxchg(&cp->sched_resp_workq_status, NOT_SCHEDULED,
+					IS_SCHEDULED) == NOT_SCHEDULED)
+			queue_work_on(cpu, cp->resp_wq, &cp->resp_work);
+		else if (cmpxchg(&cp->sched_resp_workq_status, IS_SCHEDULED,
+					SCHEDULE_AGAIN) == NOT_SCHEDULED)
+			goto retry;
+	}
+}
+
+static void req_done(struct qcrypto_req_control *pqcrypto_req_control)
+{
+	struct crypto_engine *pengine;
+	struct crypto_async_request *areq;
+	struct crypto_priv *cp;
+	struct qcrypto_resp_ctx *arsp;
+	u32 type = 0;
+	void *tfm_ctx = NULL;
+	unsigned int cpu;
+	int res;
+
+	pengine = pqcrypto_req_control->pce;
+	cp = pengine->pcp;
+	areq = pqcrypto_req_control->req;
+	arsp = pqcrypto_req_control->arsp;
+	res = pqcrypto_req_control->res;
+	qcrypto_free_req_control(pengine, pqcrypto_req_control);
+
+	if (areq) {
+		type = crypto_tfm_alg_type(areq->tfm);
+		tfm_ctx = crypto_tfm_ctx(areq->tfm);
+	}
+	cpu = smp_processor_id();
+	pengine->irq_cpu = cpu;
+	if (pengine->first_engine) {
+		if (cpu  != cp->cpu_getting_irqs_frm_first_ce)
+			cp->cpu_getting_irqs_frm_first_ce = cpu;
+	}
+	if (areq)
+		_qcrypto_tfm_complete(pengine, type, tfm_ctx, arsp, res);
+	if (READ_ONCE(cp->ce_req_proc_sts) == IN_PROGRESS)
+		_start_qcrypto_process(cp, pengine);
+}
+
+static void _qce_ahash_complete(void *cookie, unsigned char *digest,
+		unsigned char *authdata, int ret)
+{
+	struct ahash_request *areq = (struct ahash_request *) cookie;
+	struct crypto_async_request *async_req;
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(areq);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+	uint32_t diglen = crypto_ahash_digestsize(ahash);
+	uint32_t *auth32 = (uint32_t *)authdata;
+	struct crypto_engine *pengine;
+	struct qcrypto_req_control *pqcrypto_req_control;
+
+	async_req = &areq->base;
+	pstat = &_qcrypto_stat;
+
+	pengine = rctx->pengine;
+	pqcrypto_req_control = find_req_control_for_areq(pengine,
+							 async_req);
+	if (pqcrypto_req_control == NULL) {
+		pr_err("async request not found\n");
+		return;
+	}
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&pengine->pdev->dev, "_qce_ahash_complete: %p ret %d\n",
+				areq, ret);
+#endif
+	if (digest) {
+		memcpy(rctx->digest, digest, diglen);
+		if (rctx->last_blk)
+			memcpy(areq->result, digest, diglen);
+	}
+	if (authdata) {
+		rctx->byte_count[0] = auth32[0];
+		rctx->byte_count[1] = auth32[1];
+		rctx->byte_count[2] = auth32[2];
+		rctx->byte_count[3] = auth32[3];
+	}
+	areq->src = rctx->src;
+	areq->nbytes = rctx->nbytes;
+
+	rctx->last_blk = 0;
+	rctx->first_blk = 0;
+
+	if (ret) {
+		pqcrypto_req_control->res = -ENXIO;
+		pstat->ahash_op_fail++;
+	} else {
+		pqcrypto_req_control->res = 0;
+		pstat->ahash_op_success++;
+	}
+	if (cp->ce_support.aligned_only)  {
+		areq->src = rctx->orig_src;
+		kfree(rctx->data);
+	}
+	req_done(pqcrypto_req_control);
+};
+
+static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb,
+		unsigned char *iv, int ret)
+{
+	struct ablkcipher_request *areq = (struct ablkcipher_request *) cookie;
+	struct crypto_async_request *async_req;
+	struct crypto_ablkcipher *ablk = crypto_ablkcipher_reqtfm(areq);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_engine *pengine;
+	struct qcrypto_req_control *pqcrypto_req_control;
+
+	async_req = &areq->base;
+	pstat = &_qcrypto_stat;
+	rctx = ablkcipher_request_ctx(areq);
+	pengine = rctx->pengine;
+	pqcrypto_req_control = find_req_control_for_areq(pengine,
+							 async_req);
+	if (pqcrypto_req_control == NULL) {
+		pr_err("async request not found\n");
+		return;
+	}
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&pengine->pdev->dev, "_qce_ablk_cipher_complete: %p ret %d\n",
+				areq, ret);
+#endif
+	if (iv)
+		memcpy(ctx->iv, iv, crypto_ablkcipher_ivsize(ablk));
+
+	if (ret) {
+		pqcrypto_req_control->res = -ENXIO;
+		pstat->ablk_cipher_op_fail++;
+	} else {
+		pqcrypto_req_control->res = 0;
+		pstat->ablk_cipher_op_success++;
+	}
+
+	if (cp->ce_support.aligned_only)  {
+		struct qcrypto_cipher_req_ctx *rctx;
+		uint32_t num_sg = 0;
+		uint32_t bytes = 0;
+
+		rctx = ablkcipher_request_ctx(areq);
+		areq->src = rctx->orig_src;
+		areq->dst = rctx->orig_dst;
+
+		num_sg = qcrypto_count_sg(areq->dst, areq->nbytes);
+		bytes = qcrypto_sg_copy_from_buffer(areq->dst, num_sg,
+			rctx->data, areq->nbytes);
+		if (bytes != areq->nbytes)
+			pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes,
+								areq->nbytes);
+		kzfree(rctx->data);
+	}
+	req_done(pqcrypto_req_control);
+};
+
+static void _qce_aead_complete(void *cookie, unsigned char *icv,
+				unsigned char *iv, int ret)
+{
+	struct aead_request *areq = (struct aead_request *) cookie;
+	struct crypto_async_request *async_req;
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_stat *pstat;
+	struct crypto_engine *pengine;
+	struct qcrypto_req_control *pqcrypto_req_control;
+
+	async_req = &areq->base;
+	pstat = &_qcrypto_stat;
+	rctx = aead_request_ctx(areq);
+	pengine = rctx->pengine;
+	pqcrypto_req_control = find_req_control_for_areq(pengine,
+							 async_req);
+	if (pqcrypto_req_control == NULL) {
+		pr_err("async request not found\n");
+		return;
+	}
+
+	if (rctx->mode == QCE_MODE_CCM) {
+		kzfree(rctx->adata);
+	} else {
+		uint32_t ivsize = crypto_aead_ivsize(aead);
+
+		if (ret == 0) {
+			if (rctx->dir  == QCE_ENCRYPT) {
+				/* copy the icv to dst */
+				scatterwalk_map_and_copy(icv, areq->dst,
+						areq->cryptlen + areq->assoclen,
+						ctx->authsize, 1);
+
+			} else {
+				unsigned char tmp[SHA256_DIGESTSIZE] = {0};
+
+				/* compare icv from src */
+				scatterwalk_map_and_copy(tmp,
+					areq->src, areq->assoclen +
+					areq->cryptlen - ctx->authsize,
+					ctx->authsize, 0);
+				ret = memcmp(icv, tmp, ctx->authsize);
+				if (ret != 0)
+					ret = -EBADMSG;
+
+			}
+		} else {
+			ret = -ENXIO;
+		}
+
+		if (iv)
+			memcpy(ctx->iv, iv, ivsize);
+	}
+
+	if (ret == (-EBADMSG))
+		pstat->aead_bad_msg++;
+	else if (ret)
+		pstat->aead_op_fail++;
+	else
+		pstat->aead_op_success++;
+
+	pqcrypto_req_control->res = ret;
+	req_done(pqcrypto_req_control);
+}
+
+static int aead_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+	__be32 data;
+
+	memset(block, 0, csize);
+	block += csize;
+
+	if (csize >= 4)
+		csize = 4;
+	else if (msglen > (1 << (8 * csize)))
+		return -EOVERFLOW;
+
+	data = cpu_to_be32(msglen);
+	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+	return 0;
+}
+
+static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq)
+{
+	struct aead_request *areq = (struct aead_request *) qreq->areq;
+	unsigned int i = ((unsigned int)qreq->iv[0]) + 1;
+
+	memcpy(&qreq->nonce[0], qreq->iv, qreq->ivsize);
+	/*
+	 * Format control info per RFC 3610 and
+	 * NIST Special Publication 800-38C
+	 */
+	qreq->nonce[0] |= (8 * ((qreq->authsize - 2) / 2));
+	if (areq->assoclen)
+		qreq->nonce[0] |= 64;
+
+	if (i > MAX_NONCE)
+		return -EINVAL;
+
+	return aead_ccm_set_msg_len(qreq->nonce + 16 - i, qreq->cryptlen, i);
+}
+
+static int qcrypto_aead_ccm_format_adata(struct qce_req *qreq, uint32_t alen,
+				struct scatterlist *sg, unsigned char *adata)
+{
+	uint32_t len;
+	uint32_t bytes = 0;
+	uint32_t num_sg = 0;
+
+	/*
+	 * Add control info for associated data
+	 * RFC 3610 and NIST Special Publication 800-38C
+	 */
+	if (alen < 65280) {
+		*(__be16 *)adata = cpu_to_be16(alen);
+		len = 2;
+	} else {
+			if ((alen >= 65280) && (alen <= 0xffffffff)) {
+				*(__be16 *)adata = cpu_to_be16(0xfffe);
+				*(__be32 *)&adata[2] = cpu_to_be32(alen);
+				len = 6;
+		} else {
+				*(__be16 *)adata = cpu_to_be16(0xffff);
+				*(__be32 *)&adata[6] = cpu_to_be32(alen);
+				len = 10;
+		}
+	}
+	adata += len;
+	qreq->assoclen = ALIGN((alen + len), 16);
+
+	num_sg = qcrypto_count_sg(sg, alen);
+	bytes = qcrypto_sg_copy_to_buffer(sg, num_sg, adata, alen);
+	if (bytes != alen)
+		pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes, alen);
+
+	return 0;
+}
+
+static int _qcrypto_process_ablkcipher(struct crypto_engine *pengine,
+			struct qcrypto_req_control *pqcrypto_req_control)
+{
+	struct crypto_async_request *async_req;
+	struct qce_req qreq;
+	int ret;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *cipher_ctx;
+	struct ablkcipher_request *req;
+	struct crypto_ablkcipher *tfm;
+
+	async_req = pqcrypto_req_control->req;
+	req = container_of(async_req, struct ablkcipher_request, base);
+	cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->pengine = pengine;
+	tfm = crypto_ablkcipher_reqtfm(req);
+	if (pengine->pcp->ce_support.aligned_only) {
+		uint32_t bytes = 0;
+		uint32_t num_sg = 0;
+
+		rctx->orig_src = req->src;
+		rctx->orig_dst = req->dst;
+		rctx->data = kzalloc((req->nbytes + 64), GFP_ATOMIC);
+		if (rctx->data == NULL)
+			return -ENOMEM;
+		num_sg = qcrypto_count_sg(req->src, req->nbytes);
+		bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, rctx->data,
+								req->nbytes);
+		if (bytes != req->nbytes)
+			pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes,
+								req->nbytes);
+		sg_set_buf(&rctx->dsg, rctx->data, req->nbytes);
+		sg_mark_end(&rctx->dsg);
+		rctx->iv = req->info;
+
+		req->src = &rctx->dsg;
+		req->dst = &rctx->dsg;
+	}
+	qreq.op = QCE_REQ_ABLK_CIPHER;
+	qreq.qce_cb = _qce_ablk_cipher_complete;
+	qreq.areq = req;
+	qreq.alg = rctx->alg;
+	qreq.dir = rctx->dir;
+	qreq.mode = rctx->mode;
+	qreq.enckey = cipher_ctx->enc_key;
+	qreq.encklen = cipher_ctx->enc_key_len;
+	qreq.iv = req->info;
+	qreq.ivsize = crypto_ablkcipher_ivsize(tfm);
+	qreq.cryptlen = req->nbytes;
+	qreq.use_pmem = 0;
+	qreq.flags = cipher_ctx->flags;
+
+	if ((cipher_ctx->enc_key_len == 0) &&
+			(pengine->pcp->platform_support.hw_key_support == 0))
+		ret = -EINVAL;
+	else
+		ret =  qce_ablk_cipher_req(pengine->qce, &qreq);
+
+	return ret;
+}
+
+static int _qcrypto_process_ahash(struct crypto_engine *pengine,
+			struct qcrypto_req_control *pqcrypto_req_control)
+{
+	struct crypto_async_request *async_req;
+	struct ahash_request *req;
+	struct qce_sha_req sreq;
+	struct qcrypto_sha_req_ctx *rctx;
+	struct qcrypto_sha_ctx *sha_ctx;
+	int ret = 0;
+
+	async_req = pqcrypto_req_control->req;
+	req = container_of(async_req,
+				struct ahash_request, base);
+	rctx = ahash_request_ctx(req);
+	sha_ctx = crypto_tfm_ctx(async_req->tfm);
+	rctx->pengine = pengine;
+
+	sreq.qce_cb = _qce_ahash_complete;
+	sreq.digest =  &rctx->digest[0];
+	sreq.src = req->src;
+	sreq.auth_data[0] = rctx->byte_count[0];
+	sreq.auth_data[1] = rctx->byte_count[1];
+	sreq.auth_data[2] = rctx->byte_count[2];
+	sreq.auth_data[3] = rctx->byte_count[3];
+	sreq.first_blk = rctx->first_blk;
+	sreq.last_blk = rctx->last_blk;
+	sreq.size = req->nbytes;
+	sreq.areq = req;
+	sreq.flags = sha_ctx->flags;
+
+	switch (sha_ctx->alg) {
+	case QCE_HASH_SHA1:
+		sreq.alg = QCE_HASH_SHA1;
+		sreq.authkey = NULL;
+		break;
+	case QCE_HASH_SHA256:
+		sreq.alg = QCE_HASH_SHA256;
+		sreq.authkey = NULL;
+		break;
+	case QCE_HASH_SHA1_HMAC:
+		sreq.alg = QCE_HASH_SHA1_HMAC;
+		sreq.authkey = &sha_ctx->authkey[0];
+		sreq.authklen = SHA_HMAC_KEY_SIZE;
+		break;
+	case QCE_HASH_SHA256_HMAC:
+		sreq.alg = QCE_HASH_SHA256_HMAC;
+		sreq.authkey = &sha_ctx->authkey[0];
+		sreq.authklen = SHA_HMAC_KEY_SIZE;
+		break;
+	default:
+		pr_err("Algorithm %d not supported, exiting", sha_ctx->alg);
+		ret = -1;
+		break;
+	};
+	ret =  qce_process_sha_req(pengine->qce, &sreq);
+
+	return ret;
+}
+
+static int _qcrypto_process_aead(struct  crypto_engine *pengine,
+			struct qcrypto_req_control *pqcrypto_req_control)
+{
+	struct crypto_async_request *async_req;
+	struct qce_req qreq;
+	int ret = 0;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *cipher_ctx;
+	struct aead_request *req;
+	struct crypto_aead *aead;
+
+	async_req = pqcrypto_req_control->req;
+	req = container_of(async_req, struct aead_request, base);
+	aead = crypto_aead_reqtfm(req);
+	rctx = aead_request_ctx(req);
+	rctx->pengine = pengine;
+	cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+
+	qreq.op = QCE_REQ_AEAD;
+	qreq.qce_cb = _qce_aead_complete;
+
+	qreq.areq = req;
+	qreq.alg = rctx->alg;
+	qreq.dir = rctx->dir;
+	qreq.mode = rctx->mode;
+	qreq.iv = rctx->iv;
+
+	qreq.enckey = cipher_ctx->enc_key;
+	qreq.encklen = cipher_ctx->enc_key_len;
+	qreq.authkey = cipher_ctx->auth_key;
+	qreq.authklen = cipher_ctx->auth_key_len;
+	qreq.authsize = crypto_aead_authsize(aead);
+	qreq.auth_alg = cipher_ctx->auth_alg;
+	if (qreq.mode == QCE_MODE_CCM)
+		qreq.ivsize =  AES_BLOCK_SIZE;
+	else
+		qreq.ivsize =  crypto_aead_ivsize(aead);
+	qreq.flags = cipher_ctx->flags;
+
+	if (qreq.mode == QCE_MODE_CCM) {
+		if (qreq.dir == QCE_ENCRYPT)
+			qreq.cryptlen = req->cryptlen;
+		else
+			qreq.cryptlen = req->cryptlen -
+						qreq.authsize;
+		/* Get NONCE */
+		ret = qccrypto_set_aead_ccm_nonce(&qreq);
+		if (ret)
+			return ret;
+
+		if (req->assoclen) {
+			rctx->adata = kzalloc((req->assoclen + 0x64),
+								GFP_ATOMIC);
+			if (!rctx->adata)
+				return -ENOMEM;
+			/* Format Associated data    */
+			ret = qcrypto_aead_ccm_format_adata(&qreq,
+						req->assoclen,
+						req->src,
+						rctx->adata);
+		} else {
+			qreq.assoclen = 0;
+			rctx->adata = NULL;
+		}
+		if (ret) {
+			kzfree(rctx->adata);
+			return ret;
+		}
+
+		/*
+		 * update req with new formatted associated
+		 * data info
+		 */
+		qreq.asg = &rctx->asg;
+		if (rctx->adata)
+			sg_set_buf(qreq.asg, rctx->adata,
+					qreq.assoclen);
+		sg_mark_end(qreq.asg);
+	}
+	ret =  qce_aead_req(pengine->qce, &qreq);
+
+	return ret;
+}
+
+static struct crypto_engine *_qcrypto_static_assign_engine(
+					struct crypto_priv *cp)
+{
+	struct crypto_engine *pengine;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	if (cp->next_engine)
+		pengine = cp->next_engine;
+	else
+		pengine = list_first_entry(&cp->engine_list,
+				struct crypto_engine, elist);
+
+	if (list_is_last(&pengine->elist, &cp->engine_list))
+		cp->next_engine = list_first_entry(
+			&cp->engine_list, struct crypto_engine, elist);
+	else
+		cp->next_engine = list_next_entry(pengine, elist);
+	spin_unlock_irqrestore(&cp->lock, flags);
+	return pengine;
+}
+
+static int _start_qcrypto_process(struct crypto_priv *cp,
+				struct crypto_engine *pengine)
+{
+	struct crypto_async_request *async_req = NULL;
+	struct crypto_async_request *backlog_eng = NULL;
+	struct crypto_async_request *backlog_cp = NULL;
+	unsigned long flags;
+	u32 type;
+	int ret = 0;
+	struct crypto_stat *pstat;
+	void *tfm_ctx;
+	struct qcrypto_cipher_req_ctx *cipher_rctx;
+	struct qcrypto_sha_req_ctx *ahash_rctx;
+	struct ablkcipher_request *ablkcipher_req;
+	struct ahash_request *ahash_req;
+	struct aead_request *aead_req;
+	struct qcrypto_resp_ctx *arsp;
+	struct qcrypto_req_control *pqcrypto_req_control;
+	unsigned int cpu = MAX_SMP_CPU;
+
+	if (READ_ONCE(cp->ce_req_proc_sts) == STOPPED)
+		return 0;
+
+	if (in_interrupt()) {
+		cpu = smp_processor_id();
+		if (cpu >= MAX_SMP_CPU)
+			cpu = MAX_SMP_CPU - 1;
+	} else
+		cpu = MAX_SMP_CPU;
+
+	pstat = &_qcrypto_stat;
+
+again:
+	spin_lock_irqsave(&cp->lock, flags);
+	if (pengine->issue_req ||
+		atomic_read(&pengine->req_count) >= (pengine->max_req)) {
+		spin_unlock_irqrestore(&cp->lock, flags);
+		return 0;
+	}
+
+	backlog_eng = crypto_get_backlog(&pengine->req_queue);
+
+	/* make sure it is in high bandwidth state */
+	if (pengine->bw_state != BUS_HAS_BANDWIDTH) {
+		spin_unlock_irqrestore(&cp->lock, flags);
+		return 0;
+	}
+
+	/* try to get request from request queue of the engine first */
+	async_req = crypto_dequeue_request(&pengine->req_queue);
+	if (!async_req) {
+		/*
+		 * if no request from the engine,
+		 * try to  get from request queue of driver
+		 */
+		backlog_cp = crypto_get_backlog(&cp->req_queue);
+		async_req = crypto_dequeue_request(&cp->req_queue);
+		if (!async_req) {
+			spin_unlock_irqrestore(&cp->lock, flags);
+			return 0;
+		}
+	}
+	pqcrypto_req_control = qcrypto_alloc_req_control(pengine);
+	if (pqcrypto_req_control == NULL) {
+		pr_err("Allocation of request failed\n");
+		spin_unlock_irqrestore(&cp->lock, flags);
+		return 0;
+	}
+
+	/* add associated rsp entry to tfm response queue */
+	type = crypto_tfm_alg_type(async_req->tfm);
+	tfm_ctx = crypto_tfm_ctx(async_req->tfm);
+	switch (type) {
+	case CRYPTO_ALG_TYPE_AHASH:
+		ahash_req = container_of(async_req,
+			struct ahash_request, base);
+		ahash_rctx = ahash_request_ctx(ahash_req);
+		arsp = &ahash_rctx->rsp_entry;
+		list_add_tail(
+			&arsp->list,
+			&((struct qcrypto_sha_ctx *)tfm_ctx)
+				->rsp_queue);
+		break;
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		ablkcipher_req = container_of(async_req,
+			struct ablkcipher_request, base);
+		cipher_rctx = ablkcipher_request_ctx(ablkcipher_req);
+		arsp = &cipher_rctx->rsp_entry;
+		list_add_tail(
+			&arsp->list,
+			&((struct qcrypto_cipher_ctx *)tfm_ctx)
+				->rsp_queue);
+		break;
+	case CRYPTO_ALG_TYPE_AEAD:
+	default:
+		aead_req = container_of(async_req,
+			struct aead_request, base);
+		cipher_rctx = aead_request_ctx(aead_req);
+		arsp = &cipher_rctx->rsp_entry;
+		list_add_tail(
+			&arsp->list,
+			&((struct qcrypto_cipher_ctx *)tfm_ctx)
+				->rsp_queue);
+		break;
+	}
+
+	arsp->res = -EINPROGRESS;
+	arsp->async_req = async_req;
+	pqcrypto_req_control->pce = pengine;
+	pqcrypto_req_control->req = async_req;
+	pqcrypto_req_control->arsp = arsp;
+	pengine->active_seq++;
+	pengine->check_flag = true;
+
+	pengine->issue_req = true;
+	cp->cpu_req[cpu]++;
+	smp_mb(); /* make it visible */
+
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (backlog_eng)
+		backlog_eng->complete(backlog_eng, -EINPROGRESS);
+	if (backlog_cp)
+		backlog_cp->complete(backlog_cp, -EINPROGRESS);
+	switch (type) {
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		ret = _qcrypto_process_ablkcipher(pengine,
+					pqcrypto_req_control);
+		break;
+	case CRYPTO_ALG_TYPE_AHASH:
+		ret = _qcrypto_process_ahash(pengine, pqcrypto_req_control);
+		break;
+	case CRYPTO_ALG_TYPE_AEAD:
+		ret = _qcrypto_process_aead(pengine, pqcrypto_req_control);
+		break;
+	default:
+		ret = -EINVAL;
+	};
+
+	pengine->issue_req = false;
+	smp_mb(); /* make it visible */
+
+	pengine->total_req++;
+	if (ret) {
+		pengine->err_req++;
+		qcrypto_free_req_control(pengine, pqcrypto_req_control);
+
+		if (type == CRYPTO_ALG_TYPE_ABLKCIPHER)
+			pstat->ablk_cipher_op_fail++;
+		else
+			if (type == CRYPTO_ALG_TYPE_AHASH)
+				pstat->ahash_op_fail++;
+			else
+				pstat->aead_op_fail++;
+
+		_qcrypto_tfm_complete(pengine, type, tfm_ctx, arsp, ret);
+		goto again;
+	};
+	return ret;
+}
+
+static inline struct crypto_engine *_next_eng(struct crypto_priv *cp,
+		struct crypto_engine *p)
+{
+
+	if (p == NULL || list_is_last(&p->elist, &cp->engine_list))
+		p =  list_first_entry(&cp->engine_list, struct crypto_engine,
+			elist);
+	else
+		p = list_entry(p->elist.next, struct crypto_engine, elist);
+	return p;
+}
+static struct crypto_engine *_avail_eng(struct crypto_priv *cp)
+{
+	/* call this function with spinlock set */
+	struct crypto_engine *q = NULL;
+	struct crypto_engine *p = cp->scheduled_eng;
+	struct crypto_engine *q1;
+	int eng_cnt = cp->total_units;
+
+	if (unlikely(list_empty(&cp->engine_list))) {
+		pr_err("%s: no valid ce to schedule\n", __func__);
+		return NULL;
+	}
+
+	p = _next_eng(cp, p);
+	q1 = p;
+	while (eng_cnt-- > 0) {
+		if (!p->issue_req && atomic_read(&p->req_count) < p->max_req) {
+			q = p;
+			break;
+		}
+		p = _next_eng(cp, p);
+		if (q1 == p)
+			break;
+	}
+	cp->scheduled_eng = q;
+	return q;
+}
+
+static int _qcrypto_queue_req(struct crypto_priv *cp,
+				struct crypto_engine *pengine,
+				struct crypto_async_request *req)
+{
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cp->lock, flags);
+
+	if (pengine) {
+		ret = crypto_enqueue_request(&pengine->req_queue, req);
+	} else {
+		ret = crypto_enqueue_request(&cp->req_queue, req);
+		pengine = _avail_eng(cp);
+		if (cp->req_queue.qlen > cp->max_qlen)
+			cp->max_qlen = cp->req_queue.qlen;
+	}
+	if (pengine) {
+		switch (pengine->bw_state) {
+		case BUS_NO_BANDWIDTH:
+			if (pengine->high_bw_req == false) {
+				qcrypto_ce_bw_allocate_req(pengine);
+				pengine->high_bw_req = true;
+			}
+			pengine = NULL;
+			break;
+		case BUS_HAS_BANDWIDTH:
+			break;
+		case BUS_BANDWIDTH_RELEASING:
+			pengine->high_bw_req = true;
+			pengine = NULL;
+			break;
+		case BUS_BANDWIDTH_ALLOCATING:
+			pengine = NULL;
+			break;
+		case BUS_SUSPENDED:
+		case BUS_SUSPENDING:
+		default:
+			pengine = NULL;
+			break;
+		}
+	} else {
+		cp->no_avail++;
+	}
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (pengine && (READ_ONCE(cp->ce_req_proc_sts) == IN_PROGRESS))
+		_start_qcrypto_process(cp, pengine);
+	return ret;
+}
+
+static int _qcrypto_enc_aes_192_fallback(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	int err;
+
+	SKCIPHER_REQUEST_ON_STACK(subreq, ctx->cipher_aes192_fb);
+	skcipher_request_set_tfm(subreq, ctx->cipher_aes192_fb);
+	skcipher_request_set_callback(subreq, req->base.flags,
+					NULL, NULL);
+	skcipher_request_set_crypt(subreq, req->src, req->dst,
+					req->nbytes, req->info);
+	err = crypto_skcipher_encrypt(subreq);
+	skcipher_request_zero(subreq);
+	return err;
+}
+
+static int _qcrypto_dec_aes_192_fallback(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	int err;
+
+	SKCIPHER_REQUEST_ON_STACK(subreq, ctx->cipher_aes192_fb);
+	skcipher_request_set_tfm(subreq, ctx->cipher_aes192_fb);
+	skcipher_request_set_callback(subreq, req->base.flags,
+					NULL, NULL);
+	skcipher_request_set_crypt(subreq, req->src, req->dst,
+					req->nbytes, req->info);
+	err = crypto_skcipher_decrypt(subreq);
+	skcipher_request_zero(subreq);
+	return err;
+}
+
+
+static int _qcrypto_enc_aes_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ecb: %p\n", req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_enc_aes_192_fallback(req);
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_aes_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_cbc: %p\n", req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_enc_aes_192_fallback(req);
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_aes_ctr(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+				CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ctr: %p\n", req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_enc_aes_192_fallback(req);
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CTR;
+
+	pstat->ablk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_aes_xts(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_XTS;
+
+	pstat->ablk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_aead_encrypt_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
+		return  -EINVAL;
+	if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
+		(ctx->auth_key_len != AES_KEYSIZE_256))
+		return  -EINVAL;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	rctx->iv = req->iv;
+
+	pstat->aead_ccm_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_rfc4309_enc_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	memset(rctx->rfc4309_iv, 0, sizeof(rctx->rfc4309_iv));
+	rctx->rfc4309_iv[0] = 3; /* L -1 */
+	memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3);
+	memcpy(&rctx->rfc4309_iv[4], req->iv, 8);
+	rctx->iv = rctx->rfc4309_iv;
+	pstat->aead_rfc4309_ccm_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_enc_des_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_des_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_3des_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_3des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_3des_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_3des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_aes_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+				CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ecb: %p\n", req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_dec_aes_192_fallback(req);
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_aes_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+				CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_cbc: %p\n", req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_dec_aes_192_fallback(req);
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_aes_ctr(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ctr: %p\n", req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_dec_aes_192_fallback(req);
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->mode = QCE_MODE_CTR;
+
+	/* Note. There is no such thing as aes/counter mode, decrypt */
+	rctx->dir = QCE_ENCRYPT;
+
+	pstat->ablk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_des_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_des_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_3des_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_3des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_3des_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_3des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_aes_xts(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->mode = QCE_MODE_XTS;
+	rctx->dir = QCE_DECRYPT;
+
+	pstat->ablk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_aead_decrypt_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
+		return  -EINVAL;
+	if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
+		(ctx->auth_key_len != AES_KEYSIZE_256))
+		return  -EINVAL;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	rctx->iv = req->iv;
+
+	pstat->aead_ccm_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_rfc4309_dec_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	memset(rctx->rfc4309_iv, 0, sizeof(rctx->rfc4309_iv));
+	rctx->rfc4309_iv[0] = 3; /* L -1 */
+	memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3);
+	memcpy(&rctx->rfc4309_iv[4], req->iv, 8);
+	rctx->iv = rctx->rfc4309_iv;
+	pstat->aead_rfc4309_ccm_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_setauthsize(struct crypto_aead *authenc,
+				unsigned int authsize)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	return 0;
+}
+
+static int _qcrypto_aead_ccm_setauthsize(struct crypto_aead *authenc,
+				  unsigned int authsize)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+	switch (authsize) {
+	case 4:
+	case 6:
+	case 8:
+	case 10:
+	case 12:
+	case 14:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+	ctx->authsize = authsize;
+	return 0;
+}
+
+static int _qcrypto_aead_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
+				  unsigned int authsize)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+	switch (authsize) {
+	case 8:
+	case 12:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+	ctx->authsize = authsize;
+	return 0;
+}
+
+static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+			unsigned int keylen)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+	struct rtattr *rta = (struct rtattr *)key;
+	struct crypto_authenc_key_param *param;
+	int ret;
+
+	if (!RTA_OK(rta, keylen))
+		goto badkey;
+	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+		goto badkey;
+	if (RTA_PAYLOAD(rta) < sizeof(*param))
+		goto badkey;
+
+	param = RTA_DATA(rta);
+	ctx->enc_key_len = be32_to_cpu(param->enckeylen);
+
+	key += RTA_ALIGN(rta->rta_len);
+	keylen -= RTA_ALIGN(rta->rta_len);
+
+	if (keylen < ctx->enc_key_len)
+		goto badkey;
+
+	ctx->auth_key_len = keylen - ctx->enc_key_len;
+	if (ctx->enc_key_len >= QCRYPTO_MAX_KEY_SIZE ||
+				ctx->auth_key_len >= QCRYPTO_MAX_KEY_SIZE)
+		goto badkey;
+	memset(ctx->auth_key, 0, QCRYPTO_MAX_KEY_SIZE);
+	memcpy(ctx->enc_key, key + ctx->auth_key_len, ctx->enc_key_len);
+	memcpy(ctx->auth_key, key, ctx->auth_key_len);
+
+	if (ctx->enc_key_len == AES_KEYSIZE_192 &&  ctx->cipher_aes192_fb &&
+			ctx->ahash_aead_aes192_fb) {
+		crypto_ahash_clear_flags(ctx->ahash_aead_aes192_fb, ~0);
+		ret = crypto_ahash_setkey(ctx->ahash_aead_aes192_fb,
+					ctx->auth_key, ctx->auth_key_len);
+		if (ret)
+			goto badkey;
+		crypto_skcipher_clear_flags(ctx->cipher_aes192_fb, ~0);
+		ret = crypto_skcipher_setkey(ctx->cipher_aes192_fb,
+					ctx->enc_key, ctx->enc_key_len);
+		if (ret)
+			goto badkey;
+	}
+
+	return 0;
+badkey:
+	ctx->enc_key_len = 0;
+	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+static int _qcrypto_aead_ccm_setkey(struct crypto_aead *aead, const u8 *key,
+			unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	switch (keylen) {
+	case AES_KEYSIZE_128:
+	case AES_KEYSIZE_256:
+		break;
+	case AES_KEYSIZE_192:
+		if (cp->ce_support.aes_key_192)
+			break;
+	default:
+		ctx->enc_key_len = 0;
+		crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	};
+	ctx->enc_key_len = keylen;
+	memcpy(ctx->enc_key, key, keylen);
+	ctx->auth_key_len = keylen;
+	memcpy(ctx->auth_key, key, keylen);
+
+	return 0;
+}
+
+static int _qcrypto_aead_rfc4309_ccm_setkey(struct crypto_aead *aead,
+				 const u8 *key, unsigned int key_len)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	int ret;
+
+	if (key_len < QCRYPTO_CCM4309_NONCE_LEN)
+		return -EINVAL;
+	key_len -= QCRYPTO_CCM4309_NONCE_LEN;
+	memcpy(ctx->ccm4309_nonce, key + key_len,  QCRYPTO_CCM4309_NONCE_LEN);
+	ret = _qcrypto_aead_ccm_setkey(aead, key, key_len);
+	return ret;
+};
+
+static void _qcrypto_aead_aes_192_fb_a_cb(struct qcrypto_cipher_req_ctx *rctx,
+								int res)
+{
+	struct aead_request *req;
+	struct crypto_async_request *areq;
+
+	req = rctx->aead_req;
+	areq = &req->base;
+	if (rctx->fb_aes_req)
+		skcipher_request_free(rctx->fb_aes_req);
+	if (rctx->fb_hash_req)
+		ahash_request_free(rctx->fb_hash_req);
+	rctx->fb_aes_req = NULL;
+	rctx->fb_hash_req = NULL;
+	kfree(rctx->fb_aes_iv);
+	areq->complete(areq, res);
+}
+
+static void _aead_aes_fb_stage2_ahash_complete(
+				struct crypto_async_request *base, int err)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct aead_request *req;
+	struct qcrypto_cipher_ctx *ctx;
+
+	rctx = base->data;
+	req = rctx->aead_req;
+	ctx = crypto_tfm_ctx(req->base.tfm);
+	/* copy icv */
+	if (err == 0)
+		scatterwalk_map_and_copy(rctx->fb_ahash_digest,
+					rctx->fb_aes_dst,
+					req->cryptlen,
+					ctx->authsize, 1);
+	_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+}
+
+
+static int _start_aead_aes_fb_stage2_hmac(struct qcrypto_cipher_req_ctx *rctx)
+{
+	struct ahash_request *ahash_req;
+
+	ahash_req = rctx->fb_hash_req;
+	ahash_request_set_callback(ahash_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				 _aead_aes_fb_stage2_ahash_complete, rctx);
+
+	return crypto_ahash_digest(ahash_req);
+}
+
+static void _aead_aes_fb_stage2_decrypt_complete(
+			struct crypto_async_request *base, int err)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+
+	rctx = base->data;
+	_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+}
+
+static int _start_aead_aes_fb_stage2_decrypt(
+					struct qcrypto_cipher_req_ctx *rctx)
+{
+	struct skcipher_request *aes_req;
+
+	aes_req = rctx->fb_aes_req;
+	skcipher_request_set_callback(aes_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+			_aead_aes_fb_stage2_decrypt_complete, rctx);
+	return crypto_skcipher_decrypt(aes_req);
+}
+
+static void _aead_aes_fb_stage1_ahash_complete(
+				struct crypto_async_request *base, int err)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct aead_request *req;
+	struct qcrypto_cipher_ctx *ctx;
+
+	rctx = base->data;
+	req = rctx->aead_req;
+	ctx = crypto_tfm_ctx(req->base.tfm);
+
+	/* compare icv */
+	if (err == 0) {
+		unsigned char tmp[ctx->authsize];
+
+		scatterwalk_map_and_copy(tmp, rctx->fb_aes_src,
+			req->cryptlen - ctx->authsize, ctx->authsize, 0);
+		if (memcmp(rctx->fb_ahash_digest, tmp, ctx->authsize) != 0)
+			err = -EBADMSG;
+	}
+	if (err)
+		_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+	else {
+		err = _start_aead_aes_fb_stage2_decrypt(rctx);
+		if (err != -EINPROGRESS &&  err != -EBUSY)
+			_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+	}
+}
+
+static void _aead_aes_fb_stage1_encrypt_complete(
+				struct crypto_async_request *base, int err)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct aead_request *req;
+	struct qcrypto_cipher_ctx *ctx;
+
+	rctx = base->data;
+	req = rctx->aead_req;
+	ctx = crypto_tfm_ctx(req->base.tfm);
+
+	memcpy(ctx->iv, rctx->fb_aes_iv, rctx->ivsize);
+
+	if (err) {
+		_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+		return;
+	}
+
+	err = _start_aead_aes_fb_stage2_hmac(rctx);
+
+	/* copy icv */
+	if (err == 0) {
+		scatterwalk_map_and_copy(rctx->fb_ahash_digest,
+					rctx->fb_aes_dst,
+					req->cryptlen,
+					ctx->authsize, 1);
+	}
+	if (err != -EINPROGRESS &&  err != -EBUSY)
+		_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+}
+
+static int _qcrypto_aead_aes_192_fallback(struct aead_request *req,
+							bool is_encrypt)
+{
+	int rc = -EINVAL;
+	struct qcrypto_cipher_req_ctx *rctx = aead_request_ctx(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(req);
+	struct skcipher_request *aes_req = NULL;
+	struct ahash_request *ahash_req = NULL;
+	int nbytes;
+	struct scatterlist *src, *dst;
+
+	rctx->fb_aes_iv = NULL;
+	aes_req = skcipher_request_alloc(ctx->cipher_aes192_fb, GFP_KERNEL);
+	if (!aes_req)
+		return -ENOMEM;
+	ahash_req = ahash_request_alloc(ctx->ahash_aead_aes192_fb, GFP_KERNEL);
+	if (!ahash_req)
+		goto ret;
+	rctx->fb_aes_req = aes_req;
+	rctx->fb_hash_req = ahash_req;
+	rctx->aead_req = req;
+	/* assoc and iv are sitting in the beginning of src sg list */
+	/* Similarly, assoc and iv are sitting in the beginning of dst list */
+	src = scatterwalk_ffwd(rctx->fb_ablkcipher_src_sg, req->src,
+				req->assoclen);
+	dst = scatterwalk_ffwd(rctx->fb_ablkcipher_dst_sg, req->dst,
+				req->assoclen);
+
+	nbytes = req->cryptlen;
+	if (!is_encrypt)
+		nbytes -=  ctx->authsize;
+	rctx->fb_ahash_length = nbytes +  req->assoclen;
+	rctx->fb_aes_src = src;
+	rctx->fb_aes_dst = dst;
+	rctx->fb_aes_cryptlen = nbytes;
+	rctx->ivsize = crypto_aead_ivsize(aead_tfm);
+	rctx->fb_aes_iv = kzalloc(rctx->ivsize, GFP_ATOMIC);
+	if (!rctx->fb_aes_iv)
+		goto ret;
+	memcpy(rctx->fb_aes_iv, req->iv, rctx->ivsize);
+	skcipher_request_set_crypt(aes_req, rctx->fb_aes_src,
+					rctx->fb_aes_dst,
+					rctx->fb_aes_cryptlen, rctx->fb_aes_iv);
+	if (is_encrypt)
+		ahash_request_set_crypt(ahash_req, req->dst,
+					rctx->fb_ahash_digest,
+					rctx->fb_ahash_length);
+	else
+		ahash_request_set_crypt(ahash_req, req->src,
+					rctx->fb_ahash_digest,
+					rctx->fb_ahash_length);
+
+	if (is_encrypt) {
+
+		skcipher_request_set_callback(aes_req,
+			CRYPTO_TFM_REQ_MAY_BACKLOG,
+			_aead_aes_fb_stage1_encrypt_complete, rctx);
+
+		rc = crypto_skcipher_encrypt(aes_req);
+		if (rc == 0) {
+			memcpy(ctx->iv, rctx->fb_aes_iv, rctx->ivsize);
+			rc = _start_aead_aes_fb_stage2_hmac(rctx);
+			if (rc == 0) {
+				/* copy icv */
+				scatterwalk_map_and_copy(rctx->fb_ahash_digest,
+					dst,
+					req->cryptlen,
+					ctx->authsize, 1);
+			}
+		}
+		if (rc == -EINPROGRESS || rc == -EBUSY)
+			return rc;
+		goto ret;
+
+	} else {
+		ahash_request_set_callback(ahash_req,
+				CRYPTO_TFM_REQ_MAY_BACKLOG,
+				_aead_aes_fb_stage1_ahash_complete, rctx);
+
+		rc = crypto_ahash_digest(ahash_req);
+		if (rc == 0) {
+			unsigned char tmp[ctx->authsize];
+
+			/* compare icv */
+			scatterwalk_map_and_copy(tmp,
+				src, req->cryptlen - ctx->authsize,
+				ctx->authsize, 0);
+			if (memcmp(rctx->fb_ahash_digest, tmp,
+							ctx->authsize) != 0)
+				rc = -EBADMSG;
+			else
+				rc = _start_aead_aes_fb_stage2_decrypt(rctx);
+		}
+		if (rc == -EINPROGRESS || rc == -EBUSY)
+			return rc;
+		goto ret;
+	}
+ret:
+	if (aes_req)
+		skcipher_request_free(aes_req);
+	if (ahash_req)
+		ahash_request_free(ahash_req);
+	kfree(rctx->fb_aes_iv);
+	return rc;
+}
+
+static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev,
+			 "_qcrypto_aead_encrypt_aes_cbc: %p\n", req);
+#endif
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+	rctx->aead_req = req;
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_aes_enc++;
+	else
+		pstat->aead_sha256_aes_enc++;
+	if (ctx->enc_key_len == AES_KEYSIZE_192 &&  ctx->cipher_aes192_fb &&
+						ctx->ahash_aead_aes192_fb)
+		return _qcrypto_aead_aes_192_fallback(req, true);
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev,
+			 "_qcrypto_aead_decrypt_aes_cbc: %p\n", req);
+#endif
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+	rctx->aead_req = req;
+
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_aes_dec++;
+	else
+		pstat->aead_sha256_aes_dec++;
+
+	if (ctx->enc_key_len == AES_KEYSIZE_192 &&  ctx->cipher_aes192_fb &&
+						ctx->ahash_aead_aes192_fb)
+		return _qcrypto_aead_aes_192_fallback(req, false);
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_encrypt_des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_des_enc++;
+	else
+		pstat->aead_sha256_des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_des_dec++;
+	else
+		pstat->aead_sha256_des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_encrypt_3des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_3des_enc++;
+	else
+		pstat->aead_sha256_3des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_3des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_3des_dec++;
+	else
+		pstat->aead_sha256_3des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _sha_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	rctx->first_blk = 1;
+	rctx->last_blk = 0;
+	rctx->byte_count[0] = 0;
+	rctx->byte_count[1] = 0;
+	rctx->byte_count[2] = 0;
+	rctx->byte_count[3] = 0;
+	rctx->trailing_buf_len = 0;
+	rctx->count = 0;
+
+	return 0;
+};
+
+static int _sha1_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_stat *pstat;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+
+	_sha_init(req);
+	sha_ctx->alg = QCE_HASH_SHA1;
+
+	memset(&rctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+						SHA1_DIGEST_SIZE);
+	sha_ctx->diglen = SHA1_DIGEST_SIZE;
+	pstat->sha1_digest++;
+	return 0;
+};
+
+static int _sha256_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_stat *pstat;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+
+	_sha_init(req);
+	sha_ctx->alg = QCE_HASH_SHA256;
+
+	memset(&rctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+						SHA256_DIGEST_SIZE);
+	sha_ctx->diglen = SHA256_DIGEST_SIZE;
+	pstat->sha256_digest++;
+	return 0;
+};
+
+
+static int _sha1_export(struct ahash_request  *req, void *out)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha1_state *out_ctx = (struct sha1_state *)out;
+
+	out_ctx->count = rctx->count;
+	_byte_stream_to_words(out_ctx->state, rctx->digest, SHA1_DIGEST_SIZE);
+	memcpy(out_ctx->buffer, rctx->trailing_buf, SHA1_BLOCK_SIZE);
+
+	return 0;
+};
+
+static int _sha1_hmac_export(struct ahash_request  *req, void *out)
+{
+	return _sha1_export(req, out);
+}
+
+/* crypto hw padding constant for hmac first operation */
+#define HMAC_PADDING 64
+
+static int __sha1_import_common(struct ahash_request  *req, const void *in,
+				bool hmac)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha1_state *in_ctx = (struct sha1_state *)in;
+	u64 hw_count = in_ctx->count;
+
+	rctx->count = in_ctx->count;
+	memcpy(rctx->trailing_buf, in_ctx->buffer, SHA1_BLOCK_SIZE);
+	if (in_ctx->count <= SHA1_BLOCK_SIZE) {
+		rctx->first_blk = 1;
+	} else {
+		rctx->first_blk = 0;
+		/*
+		 * For hmac, there is a hardware padding done
+		 * when first is set. So the byte_count will be
+		 * incremened by 64 after the operstion of first
+		 */
+		if (hmac)
+			hw_count += HMAC_PADDING;
+	}
+	rctx->byte_count[0] =  (uint32_t)(hw_count & 0xFFFFFFC0);
+	rctx->byte_count[1] =  (uint32_t)(hw_count >> 32);
+	_words_to_byte_stream(in_ctx->state, rctx->digest, sha_ctx->diglen);
+
+	rctx->trailing_buf_len = (uint32_t)(in_ctx->count &
+						(SHA1_BLOCK_SIZE-1));
+	return 0;
+}
+
+static int _sha1_import(struct ahash_request  *req, const void *in)
+{
+	return __sha1_import_common(req, in, false);
+}
+
+static int _sha1_hmac_import(struct ahash_request  *req, const void *in)
+{
+	return __sha1_import_common(req, in, true);
+}
+
+static int _sha256_export(struct ahash_request  *req, void *out)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha256_state *out_ctx = (struct sha256_state *)out;
+
+	out_ctx->count = rctx->count;
+	_byte_stream_to_words(out_ctx->state, rctx->digest, SHA256_DIGEST_SIZE);
+	memcpy(out_ctx->buf, rctx->trailing_buf, SHA256_BLOCK_SIZE);
+
+	return 0;
+};
+
+static int _sha256_hmac_export(struct ahash_request  *req, void *out)
+{
+	return _sha256_export(req, out);
+}
+
+static int __sha256_import_common(struct ahash_request  *req, const void *in,
+			bool hmac)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha256_state *in_ctx = (struct sha256_state *)in;
+	u64 hw_count = in_ctx->count;
+
+	rctx->count = in_ctx->count;
+	memcpy(rctx->trailing_buf, in_ctx->buf, SHA256_BLOCK_SIZE);
+
+	if (in_ctx->count <= SHA256_BLOCK_SIZE) {
+		rctx->first_blk = 1;
+	} else {
+		rctx->first_blk = 0;
+		/*
+		 * for hmac, there is a hardware padding done
+		 * when first is set. So the byte_count will be
+		 * incremened by 64 after the operstion of first
+		 */
+		if (hmac)
+			hw_count += HMAC_PADDING;
+	}
+
+	rctx->byte_count[0] =  (uint32_t)(hw_count & 0xFFFFFFC0);
+	rctx->byte_count[1] =  (uint32_t)(hw_count >> 32);
+	_words_to_byte_stream(in_ctx->state, rctx->digest, sha_ctx->diglen);
+
+	rctx->trailing_buf_len = (uint32_t)(in_ctx->count &
+						(SHA256_BLOCK_SIZE-1));
+
+
+	return 0;
+}
+
+static int _sha256_import(struct ahash_request  *req, const void *in)
+{
+	return __sha256_import_common(req, in, false);
+}
+
+static int _sha256_hmac_import(struct ahash_request  *req, const void *in)
+{
+	return __sha256_import_common(req, in, true);
+}
+
+static int _copy_source(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *srctx = NULL;
+	uint32_t bytes = 0;
+	uint32_t num_sg = 0;
+
+	srctx = ahash_request_ctx(req);
+	srctx->orig_src = req->src;
+	srctx->data = kzalloc((req->nbytes + 64), GFP_ATOMIC);
+	if (srctx->data == NULL) {
+		pr_err("Mem Alloc fail rctx->data, err %ld for 0x%x\n",
+				PTR_ERR(srctx->data), (req->nbytes + 64));
+		return -ENOMEM;
+	}
+
+	num_sg = qcrypto_count_sg(req->src, req->nbytes);
+	bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, srctx->data,
+						req->nbytes);
+	if (bytes != req->nbytes)
+		pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes,
+							req->nbytes);
+	sg_set_buf(&srctx->dsg, srctx->data,
+				req->nbytes);
+	sg_mark_end(&srctx->dsg);
+	req->src = &srctx->dsg;
+
+	return 0;
+}
+
+static int _sha_update(struct ahash_request  *req, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	uint32_t total, len, num_sg;
+	struct scatterlist *sg_last;
+	uint8_t *k_src = NULL;
+	uint32_t sha_pad_len = 0;
+	uint32_t trailing_buf_len = 0;
+	uint32_t nbytes;
+	uint32_t offset = 0;
+	uint32_t bytes = 0;
+	uint8_t  *staging;
+	int ret = 0;
+
+	/* check for trailing buffer from previous updates and append it */
+	total = req->nbytes + rctx->trailing_buf_len;
+	len = req->nbytes;
+
+	if (total <= sha_block_size) {
+		k_src = &rctx->trailing_buf[rctx->trailing_buf_len];
+		num_sg = qcrypto_count_sg(req->src, len);
+		bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, k_src, len);
+
+		rctx->trailing_buf_len = total;
+		return 0;
+	}
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+
+	staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+							L1_CACHE_BYTES);
+	memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+	k_src = &rctx->trailing_buf[0];
+	/*  get new trailing buffer */
+	sha_pad_len = ALIGN(total, sha_block_size) - total;
+	trailing_buf_len =  sha_block_size - sha_pad_len;
+	offset = req->nbytes - trailing_buf_len;
+
+	if (offset != req->nbytes)
+		scatterwalk_map_and_copy(k_src, req->src, offset,
+						trailing_buf_len, 0);
+
+	nbytes = total - trailing_buf_len;
+	num_sg = qcrypto_count_sg(req->src, req->nbytes);
+
+	len = rctx->trailing_buf_len;
+	sg_last = req->src;
+
+	while (len < nbytes) {
+		if ((len + sg_last->length) > nbytes)
+			break;
+		len += sg_last->length;
+		sg_last = sg_next(sg_last);
+	}
+	if (rctx->trailing_buf_len) {
+		if (cp->ce_support.aligned_only)  {
+			rctx->data2 = kzalloc((req->nbytes + 64), GFP_ATOMIC);
+			if (rctx->data2 == NULL) {
+				pr_err("Mem Alloc fail srctx->data2, err %ld\n",
+							PTR_ERR(rctx->data2));
+				return -ENOMEM;
+			}
+			memcpy(rctx->data2, staging,
+						rctx->trailing_buf_len);
+			memcpy((rctx->data2 + rctx->trailing_buf_len),
+					rctx->data, req->src->length);
+			kzfree(rctx->data);
+			rctx->data = rctx->data2;
+			sg_set_buf(&rctx->sg[0], rctx->data,
+					(rctx->trailing_buf_len +
+							req->src->length));
+			req->src = rctx->sg;
+			sg_mark_end(&rctx->sg[0]);
+		} else {
+			sg_mark_end(sg_last);
+			memset(rctx->sg, 0, sizeof(rctx->sg));
+			sg_set_buf(&rctx->sg[0], staging,
+						rctx->trailing_buf_len);
+			sg_mark_end(&rctx->sg[1]);
+			sg_chain(rctx->sg, 2, req->src);
+			req->src = rctx->sg;
+		}
+	} else
+		sg_mark_end(sg_last);
+
+	req->nbytes = nbytes;
+	rctx->trailing_buf_len = trailing_buf_len;
+
+	ret =  _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+
+	return ret;
+};
+
+static int _sha1_update(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+
+	if (cp->ce_support.aligned_only) {
+		if (_copy_source(req))
+			return -ENOMEM;
+	}
+	rctx->count += req->nbytes;
+	return _sha_update(req, SHA1_BLOCK_SIZE);
+}
+
+static int _sha256_update(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+
+	if (cp->ce_support.aligned_only) {
+		if (_copy_source(req))
+			return -ENOMEM;
+	}
+
+	rctx->count += req->nbytes;
+	return _sha_update(req, SHA256_BLOCK_SIZE);
+}
+
+static int _sha_final(struct ahash_request *req, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	int ret = 0;
+	uint8_t  *staging;
+
+	if (cp->ce_support.aligned_only) {
+		if (_copy_source(req))
+			return -ENOMEM;
+	}
+
+	rctx->last_blk = 1;
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+
+	staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+							L1_CACHE_BYTES);
+	memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+	sg_set_buf(&rctx->sg[0], staging, rctx->trailing_buf_len);
+	sg_mark_end(&rctx->sg[0]);
+
+	req->src = &rctx->sg[0];
+	req->nbytes = rctx->trailing_buf_len;
+
+	ret =  _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+
+	return ret;
+};
+
+static int _sha1_final(struct ahash_request  *req)
+{
+	return _sha_final(req, SHA1_BLOCK_SIZE);
+}
+
+static int _sha256_final(struct ahash_request  *req)
+{
+	return _sha_final(req, SHA256_BLOCK_SIZE);
+}
+
+static int _sha_digest(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+
+	if (cp->ce_support.aligned_only) {
+		if (_copy_source(req))
+			return -ENOMEM;
+	}
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+	rctx->first_blk = 1;
+	rctx->last_blk = 1;
+	ret =  _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+
+	return ret;
+}
+
+static int _sha1_digest(struct ahash_request *req)
+{
+	_sha1_init(req);
+	return _sha_digest(req);
+}
+
+static int _sha256_digest(struct ahash_request *req)
+{
+	_sha256_init(req);
+	return _sha_digest(req);
+}
+
+static void _crypto_sha_hmac_ahash_req_complete(
+	struct crypto_async_request *req, int err)
+{
+	struct completion *ahash_req_complete = req->data;
+
+	if (err == -EINPROGRESS)
+		return;
+	complete(ahash_req_complete);
+}
+
+static int _sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+		unsigned int len)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+	uint8_t	*in_buf;
+	int ret = 0;
+	struct scatterlist sg;
+	struct ahash_request *ahash_req;
+	struct completion ahash_req_complete;
+
+	ahash_req = ahash_request_alloc(tfm, GFP_KERNEL);
+	if (ahash_req == NULL)
+		return -ENOMEM;
+	init_completion(&ahash_req_complete);
+	ahash_request_set_callback(ahash_req,
+				CRYPTO_TFM_REQ_MAY_BACKLOG,
+				_crypto_sha_hmac_ahash_req_complete,
+				&ahash_req_complete);
+	crypto_ahash_clear_flags(tfm, ~0);
+
+	in_buf = kzalloc(len + 64, GFP_KERNEL);
+	if (in_buf == NULL) {
+		ahash_request_free(ahash_req);
+		return -ENOMEM;
+	}
+	memcpy(in_buf, key, len);
+	sg_set_buf(&sg, in_buf, len);
+	sg_mark_end(&sg);
+
+	ahash_request_set_crypt(ahash_req, &sg,
+				&sha_ctx->authkey[0], len);
+
+	if (sha_ctx->alg == QCE_HASH_SHA1)
+		ret = _sha1_digest(ahash_req);
+	else
+		ret = _sha256_digest(ahash_req);
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		ret =
+			wait_for_completion_interruptible(
+						&ahash_req_complete);
+		reinit_completion(&sha_ctx->ahash_req_complete);
+	}
+
+	kzfree(in_buf);
+	ahash_request_free(ahash_req);
+
+	return ret;
+}
+
+static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+							unsigned int len)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+
+	memset(&sha_ctx->authkey[0], 0, SHA1_BLOCK_SIZE);
+	if (len <= SHA1_BLOCK_SIZE) {
+		memcpy(&sha_ctx->authkey[0], key, len);
+		sha_ctx->authkey_in_len = len;
+	} else {
+		sha_ctx->alg = QCE_HASH_SHA1;
+		sha_ctx->diglen = SHA1_DIGEST_SIZE;
+		_sha_hmac_setkey(tfm, key, len);
+		sha_ctx->authkey_in_len = SHA1_BLOCK_SIZE;
+	}
+	return 0;
+}
+
+static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+							unsigned int len)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+
+	memset(&sha_ctx->authkey[0], 0, SHA256_BLOCK_SIZE);
+	if (len <= SHA256_BLOCK_SIZE) {
+		memcpy(&sha_ctx->authkey[0], key, len);
+		sha_ctx->authkey_in_len = len;
+	} else {
+		sha_ctx->alg = QCE_HASH_SHA256;
+		sha_ctx->diglen = SHA256_DIGEST_SIZE;
+		_sha_hmac_setkey(tfm, key, len);
+		sha_ctx->authkey_in_len = SHA256_BLOCK_SIZE;
+	}
+
+	return 0;
+}
+
+static int _sha_hmac_init_ihash(struct ahash_request *req,
+						uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	int i;
+
+	for (i = 0; i < sha_block_size; i++)
+		rctx->trailing_buf[i] = sha_ctx->authkey[i] ^ 0x36;
+	rctx->trailing_buf_len = sha_block_size;
+
+	return 0;
+}
+
+static int _sha1_hmac_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+	int ret = 0;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+	pstat->sha1_hmac_digest++;
+
+	_sha_init(req);
+	memset(&rctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+						SHA1_DIGEST_SIZE);
+	sha_ctx->diglen = SHA1_DIGEST_SIZE;
+
+	if (cp->ce_support.sha_hmac)
+		sha_ctx->alg = QCE_HASH_SHA1_HMAC;
+	else {
+		sha_ctx->alg = QCE_HASH_SHA1;
+		ret = _sha_hmac_init_ihash(req, SHA1_BLOCK_SIZE);
+	}
+
+	return ret;
+}
+
+static int _sha256_hmac_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+	int ret = 0;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+	pstat->sha256_hmac_digest++;
+
+	_sha_init(req);
+
+	memset(&rctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+						SHA256_DIGEST_SIZE);
+	sha_ctx->diglen = SHA256_DIGEST_SIZE;
+
+	if (cp->ce_support.sha_hmac)
+		sha_ctx->alg = QCE_HASH_SHA256_HMAC;
+	else {
+		sha_ctx->alg = QCE_HASH_SHA256;
+		ret = _sha_hmac_init_ihash(req, SHA256_BLOCK_SIZE);
+	}
+
+	return ret;
+}
+
+static int _sha1_hmac_update(struct ahash_request *req)
+{
+	return _sha1_update(req);
+}
+
+static int _sha256_hmac_update(struct ahash_request *req)
+{
+	return _sha256_update(req);
+}
+
+static int _sha_hmac_outer_hash(struct ahash_request *req,
+		uint32_t sha_digest_size, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int i;
+	uint8_t  *staging;
+	uint8_t *p;
+
+	staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+							L1_CACHE_BYTES);
+	p = staging;
+	for (i = 0; i < sha_block_size; i++)
+		*p++ = sha_ctx->authkey[i] ^ 0x5c;
+	memcpy(p, &rctx->digest[0], sha_digest_size);
+	sg_set_buf(&rctx->sg[0], staging, sha_block_size +
+							sha_digest_size);
+	sg_mark_end(&rctx->sg[0]);
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+
+	req->src = &rctx->sg[0];
+	req->nbytes = sha_block_size + sha_digest_size;
+
+	_sha_init(req);
+	if (sha_ctx->alg == QCE_HASH_SHA1) {
+		memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+							SHA1_DIGEST_SIZE);
+		sha_ctx->diglen = SHA1_DIGEST_SIZE;
+	} else {
+		memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+							SHA256_DIGEST_SIZE);
+		sha_ctx->diglen = SHA256_DIGEST_SIZE;
+	}
+
+	rctx->last_blk = 1;
+	return  _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+}
+
+static int _sha_hmac_inner_hash(struct ahash_request *req,
+			uint32_t sha_digest_size, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct ahash_request *areq = sha_ctx->ahash_req;
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	uint8_t  *staging;
+
+	staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+							L1_CACHE_BYTES);
+	memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+	sg_set_buf(&rctx->sg[0], staging, rctx->trailing_buf_len);
+	sg_mark_end(&rctx->sg[0]);
+
+	ahash_request_set_crypt(areq, &rctx->sg[0], &rctx->digest[0],
+						rctx->trailing_buf_len);
+	rctx->last_blk = 1;
+	ret =  _qcrypto_queue_req(cp, sha_ctx->pengine, &areq->base);
+
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		ret =
+		wait_for_completion_interruptible(&sha_ctx->ahash_req_complete);
+		reinit_completion(&sha_ctx->ahash_req_complete);
+	}
+
+	return ret;
+}
+
+static int _sha1_hmac_final(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+
+	if (cp->ce_support.sha_hmac)
+		return _sha_final(req, SHA1_BLOCK_SIZE);
+	ret = _sha_hmac_inner_hash(req, SHA1_DIGEST_SIZE, SHA1_BLOCK_SIZE);
+	if (ret)
+		return ret;
+	return _sha_hmac_outer_hash(req, SHA1_DIGEST_SIZE, SHA1_BLOCK_SIZE);
+}
+
+static int _sha256_hmac_final(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+
+	if (cp->ce_support.sha_hmac)
+		return _sha_final(req, SHA256_BLOCK_SIZE);
+
+	ret = _sha_hmac_inner_hash(req, SHA256_DIGEST_SIZE, SHA256_BLOCK_SIZE);
+	if (ret)
+		return ret;
+
+	return _sha_hmac_outer_hash(req, SHA256_DIGEST_SIZE, SHA256_BLOCK_SIZE);
+}
+
+
+static int _sha1_hmac_digest(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_stat *pstat;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+	pstat->sha1_hmac_digest++;
+
+	_sha_init(req);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+							SHA1_DIGEST_SIZE);
+	sha_ctx->diglen = SHA1_DIGEST_SIZE;
+	sha_ctx->alg = QCE_HASH_SHA1_HMAC;
+
+	return _sha_digest(req);
+}
+
+static int _sha256_hmac_digest(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_stat *pstat;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+	pstat->sha256_hmac_digest++;
+
+	_sha_init(req);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+						SHA256_DIGEST_SIZE);
+	sha_ctx->diglen = SHA256_DIGEST_SIZE;
+	sha_ctx->alg = QCE_HASH_SHA256_HMAC;
+
+	return _sha_digest(req);
+}
+
+static int _qcrypto_prefix_alg_cra_name(char cra_name[], unsigned int size)
+{
+	char new_cra_name[CRYPTO_MAX_ALG_NAME] = "qcom-";
+
+	if (size >= CRYPTO_MAX_ALG_NAME - strlen("qcom-"))
+		return -EINVAL;
+	strlcat(new_cra_name, cra_name, CRYPTO_MAX_ALG_NAME);
+	strlcpy(cra_name, new_cra_name, CRYPTO_MAX_ALG_NAME);
+	return 0;
+}
+
+
+int qcrypto_cipher_set_device(struct ablkcipher_request *req, unsigned int dev)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_engine *pengine = NULL;
+
+	pengine = _qrypto_find_pengine_device(cp, dev);
+	if (pengine == NULL)
+		return -ENODEV;
+	ctx->pengine = pengine;
+
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_cipher_set_device);
+
+int qcrypto_cipher_set_device_hw(struct ablkcipher_request *req, u32 dev,
+			u32 hw_inst)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_engine *pengine = NULL;
+
+	pengine = _qrypto_find_pengine_device_hw(cp, dev, hw_inst);
+	if (pengine == NULL)
+		return -ENODEV;
+	ctx->pengine = pengine;
+
+	return 0;
+}
+EXPORT_SYMBOL(qcrypto_cipher_set_device_hw);
+
+int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_engine *pengine = NULL;
+
+	pengine = _qrypto_find_pengine_device(cp, dev);
+	if (pengine == NULL)
+		return -ENODEV;
+	ctx->pengine = pengine;
+
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_aead_set_device);
+
+int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev)
+{
+	struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_engine *pengine = NULL;
+
+	pengine = _qrypto_find_pengine_device(cp, dev);
+	if (pengine == NULL)
+		return -ENODEV;
+	ctx->pengine = pengine;
+
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_ahash_set_device);
+
+int qcrypto_cipher_set_flag(struct ablkcipher_request *req, unsigned int flags)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
+		(cp->platform_support.hw_key_support == false)) {
+		pr_err("%s HW key usage not supported\n", __func__);
+		return -EINVAL;
+	}
+	if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
+						QCRYPTO_CTX_KEY_MASK) {
+		pr_err("%s Cannot set all key flags\n", __func__);
+		return -EINVAL;
+	}
+
+	ctx->flags |= flags;
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_cipher_set_flag);
+
+int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
+		(cp->platform_support.hw_key_support == false)) {
+		pr_err("%s HW key usage not supported\n", __func__);
+		return -EINVAL;
+	}
+	if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
+						QCRYPTO_CTX_KEY_MASK) {
+		pr_err("%s Cannot set all key flags\n", __func__);
+		return -EINVAL;
+	}
+
+	ctx->flags |= flags;
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_aead_set_flag);
+
+int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags)
+{
+	struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
+		(cp->platform_support.hw_key_support == false)) {
+		pr_err("%s HW key usage not supported\n", __func__);
+		return -EINVAL;
+	}
+	if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
+						QCRYPTO_CTX_KEY_MASK) {
+		pr_err("%s Cannot set all key flags\n", __func__);
+		return -EINVAL;
+	}
+
+	ctx->flags |= flags;
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_ahash_set_flag);
+
+int qcrypto_cipher_clear_flag(struct ablkcipher_request *req,
+							unsigned int flags)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+	ctx->flags &= ~flags;
+	return 0;
+
+};
+EXPORT_SYMBOL(qcrypto_cipher_clear_flag);
+
+int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+	ctx->flags &= ~flags;
+	return 0;
+
+};
+EXPORT_SYMBOL(qcrypto_aead_clear_flag);
+
+int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags)
+{
+	struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+	ctx->flags &= ~flags;
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_ahash_clear_flag);
+
+static struct ahash_alg _qcrypto_ahash_algos[] = {
+	{
+		.init		=	_sha1_init,
+		.update		=	_sha1_update,
+		.final		=	_sha1_final,
+		.export		=	_sha1_export,
+		.import		=	_sha1_import,
+		.digest		=	_sha1_digest,
+		.halg		= {
+			.digestsize	= SHA1_DIGEST_SIZE,
+			.statesize	= sizeof(struct sha1_state),
+			.base	= {
+				.cra_name	 = "sha1",
+				.cra_driver_name = "qcrypto-sha1",
+				.cra_priority	 = 300,
+				.cra_flags	 = CRYPTO_ALG_TYPE_AHASH |
+							 CRYPTO_ALG_ASYNC,
+				.cra_blocksize	 = SHA1_BLOCK_SIZE,
+				.cra_ctxsize	 =
+						sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask	 = 0,
+				.cra_type	 = &crypto_ahash_type,
+				.cra_module	 = THIS_MODULE,
+				.cra_init	 = _qcrypto_ahash_cra_init,
+				.cra_exit	 = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+	{
+		.init		=	_sha256_init,
+		.update		=	_sha256_update,
+		.final		=	_sha256_final,
+		.export		=	_sha256_export,
+		.import		=	_sha256_import,
+		.digest		=	_sha256_digest,
+		.halg		= {
+			.digestsize	= SHA256_DIGEST_SIZE,
+			.statesize	= sizeof(struct sha256_state),
+			.base		= {
+				.cra_name	 = "sha256",
+				.cra_driver_name = "qcrypto-sha256",
+				.cra_priority	 = 300,
+				.cra_flags	 = CRYPTO_ALG_TYPE_AHASH |
+							CRYPTO_ALG_ASYNC,
+				.cra_blocksize	 = SHA256_BLOCK_SIZE,
+				.cra_ctxsize	 =
+						sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask	 = 0,
+				.cra_type	 = &crypto_ahash_type,
+				.cra_module	 = THIS_MODULE,
+				.cra_init	 = _qcrypto_ahash_cra_init,
+				.cra_exit	 = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+};
+
+static struct ahash_alg _qcrypto_sha_hmac_algos[] = {
+	{
+		.init		=	_sha1_hmac_init,
+		.update		=	_sha1_hmac_update,
+		.final		=	_sha1_hmac_final,
+		.export		=	_sha1_hmac_export,
+		.import		=	_sha1_hmac_import,
+		.digest		=	_sha1_hmac_digest,
+		.setkey		=	_sha1_hmac_setkey,
+		.halg		= {
+			.digestsize	= SHA1_DIGEST_SIZE,
+			.statesize	= sizeof(struct sha1_state),
+			.base	= {
+				.cra_name	 = "hmac(sha1)",
+				.cra_driver_name = "qcrypto-hmac-sha1",
+				.cra_priority	 = 300,
+				.cra_flags	 = CRYPTO_ALG_TYPE_AHASH |
+							 CRYPTO_ALG_ASYNC,
+				.cra_blocksize	 = SHA1_BLOCK_SIZE,
+				.cra_ctxsize	 =
+						sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask	 = 0,
+				.cra_type	 = &crypto_ahash_type,
+				.cra_module	 = THIS_MODULE,
+				.cra_init	 = _qcrypto_ahash_hmac_cra_init,
+				.cra_exit	 = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+	{
+		.init		=	_sha256_hmac_init,
+		.update		=	_sha256_hmac_update,
+		.final		=	_sha256_hmac_final,
+		.export		=	_sha256_hmac_export,
+		.import		=	_sha256_hmac_import,
+		.digest		=	_sha256_hmac_digest,
+		.setkey		=	_sha256_hmac_setkey,
+		.halg		= {
+			.digestsize	= SHA256_DIGEST_SIZE,
+			.statesize	= sizeof(struct sha256_state),
+			.base		= {
+				.cra_name	 = "hmac(sha256)",
+				.cra_driver_name = "qcrypto-hmac-sha256",
+				.cra_priority	 = 300,
+				.cra_flags	 = CRYPTO_ALG_TYPE_AHASH |
+							CRYPTO_ALG_ASYNC,
+				.cra_blocksize	 = SHA256_BLOCK_SIZE,
+				.cra_ctxsize	 =
+						sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask	 = 0,
+				.cra_type	 = &crypto_ahash_type,
+				.cra_module	 = THIS_MODULE,
+				.cra_init	 = _qcrypto_ahash_hmac_cra_init,
+				.cra_exit	 = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+};
+
+static struct crypto_alg _qcrypto_ablk_cipher_algos[] = {
+	{
+		.cra_name		= "ecb(aes)",
+		.cra_driver_name	= "qcrypto-ecb-aes",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_NEED_FALLBACK |
+					CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_aes_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_aes_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_aes,
+				.encrypt	= _qcrypto_enc_aes_ecb,
+				.decrypt	= _qcrypto_dec_aes_ecb,
+			},
+		},
+	},
+	{
+		.cra_name	= "cbc(aes)",
+		.cra_driver_name = "qcrypto-cbc-aes",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_NEED_FALLBACK |
+					CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_aes_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_aes_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.ivsize		= AES_BLOCK_SIZE,
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_aes,
+				.encrypt	= _qcrypto_enc_aes_cbc,
+				.decrypt	= _qcrypto_dec_aes_cbc,
+			},
+		},
+	},
+	{
+		.cra_name	= "ctr(aes)",
+		.cra_driver_name = "qcrypto-ctr-aes",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_NEED_FALLBACK |
+					CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_aes_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_aes_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.ivsize		= AES_BLOCK_SIZE,
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_aes,
+				.encrypt	= _qcrypto_enc_aes_ctr,
+				.decrypt	= _qcrypto_dec_aes_ctr,
+			},
+		},
+	},
+	{
+		.cra_name		= "ecb(des)",
+		.cra_driver_name	= "qcrypto-ecb-des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= DES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.min_keysize	= DES_KEY_SIZE,
+				.max_keysize	= DES_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_des,
+				.encrypt	= _qcrypto_enc_des_ecb,
+				.decrypt	= _qcrypto_dec_des_ecb,
+			},
+		},
+	},
+	{
+		.cra_name	= "cbc(des)",
+		.cra_driver_name = "qcrypto-cbc-des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= DES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.ivsize		= DES_BLOCK_SIZE,
+				.min_keysize	= DES_KEY_SIZE,
+				.max_keysize	= DES_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_des,
+				.encrypt	= _qcrypto_enc_des_cbc,
+				.decrypt	= _qcrypto_dec_des_cbc,
+			},
+		},
+	},
+	{
+		.cra_name		= "ecb(des3_ede)",
+		.cra_driver_name	= "qcrypto-ecb-3des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.min_keysize	= DES3_EDE_KEY_SIZE,
+				.max_keysize	= DES3_EDE_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_3des,
+				.encrypt	= _qcrypto_enc_3des_ecb,
+				.decrypt	= _qcrypto_dec_3des_ecb,
+			},
+		},
+	},
+	{
+		.cra_name	= "cbc(des3_ede)",
+		.cra_driver_name = "qcrypto-cbc-3des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.ivsize		= DES3_EDE_BLOCK_SIZE,
+				.min_keysize	= DES3_EDE_KEY_SIZE,
+				.max_keysize	= DES3_EDE_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_3des,
+				.encrypt	= _qcrypto_enc_3des_cbc,
+				.decrypt	= _qcrypto_dec_3des_cbc,
+			},
+		},
+	},
+};
+
+static struct crypto_alg _qcrypto_ablk_cipher_xts_algo = {
+	.cra_name	= "xts(aes)",
+	.cra_driver_name = "qcrypto-xts-aes",
+	.cra_priority	= 300,
+	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize	= AES_BLOCK_SIZE,
+	.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+	.cra_alignmask	= 0,
+	.cra_type	= &crypto_ablkcipher_type,
+	.cra_module	= THIS_MODULE,
+	.cra_init	= _qcrypto_cra_ablkcipher_init,
+	.cra_exit	= _qcrypto_cra_ablkcipher_exit,
+	.cra_u		= {
+		.ablkcipher = {
+			.ivsize		= AES_BLOCK_SIZE,
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.setkey		= _qcrypto_setkey_aes_xts,
+			.encrypt	= _qcrypto_enc_aes_xts,
+			.decrypt	= _qcrypto_dec_aes_xts,
+		},
+	},
+};
+
+static struct aead_alg _qcrypto_aead_sha1_hmac_algos[] = {
+	{
+		.base = {
+		.cra_name	= "authenc(hmac(sha1),cbc(aes))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-aes",
+		.cra_priority	= 300,
+			.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+		},
+				.ivsize         = AES_BLOCK_SIZE,
+				.maxauthsize    = SHA1_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_aes_cbc,
+				.decrypt = _qcrypto_aead_decrypt_aes_cbc,
+		.init	= _qcrypto_cra_aead_aes_sha1_init,
+		.exit	= _qcrypto_cra_aead_aes_exit,
+	},
+	{
+		.base = {
+		.cra_name	= "authenc(hmac(sha1),cbc(des))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-des",
+		.cra_priority	= 300,
+			.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = DES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+		},
+				.ivsize         = DES_BLOCK_SIZE,
+				.maxauthsize    = SHA1_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_des_cbc,
+				.decrypt = _qcrypto_aead_decrypt_des_cbc,
+		.init	= _qcrypto_cra_aead_sha1_init,
+		.exit	= _qcrypto_cra_aead_exit,
+	},
+	{
+		.base = {
+		.cra_name	= "authenc(hmac(sha1),cbc(des3_ede))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-3des",
+		.cra_priority	= 300,
+			.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+		},
+				.ivsize         = DES3_EDE_BLOCK_SIZE,
+				.maxauthsize    = SHA1_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_3des_cbc,
+				.decrypt = _qcrypto_aead_decrypt_3des_cbc,
+		.init	= _qcrypto_cra_aead_sha1_init,
+		.exit	= _qcrypto_cra_aead_exit,
+	},
+};
+
+static struct aead_alg _qcrypto_aead_sha256_hmac_algos[] = {
+	{
+		.base = {
+		.cra_name	= "authenc(hmac(sha256),cbc(aes))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-aes",
+		.cra_priority	= 300,
+			.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+		},
+				.ivsize         = AES_BLOCK_SIZE,
+				.maxauthsize    = SHA256_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_aes_cbc,
+				.decrypt = _qcrypto_aead_decrypt_aes_cbc,
+		.init	= _qcrypto_cra_aead_aes_sha256_init,
+		.exit	= _qcrypto_cra_aead_aes_exit,
+	},
+
+	{
+		.base = {
+		.cra_name	= "authenc(hmac(sha256),cbc(des))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-des",
+		.cra_priority	= 300,
+			.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = DES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+		},
+				.ivsize         = DES_BLOCK_SIZE,
+				.maxauthsize    = SHA256_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_des_cbc,
+				.decrypt = _qcrypto_aead_decrypt_des_cbc,
+		.init	= _qcrypto_cra_aead_sha256_init,
+		.exit	= _qcrypto_cra_aead_exit,
+	},
+	{
+		.base = {
+		.cra_name	= "authenc(hmac(sha256),cbc(des3_ede))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-3des",
+		.cra_priority	= 300,
+			.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+		},
+				.ivsize         = DES3_EDE_BLOCK_SIZE,
+				.maxauthsize    = SHA256_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_3des_cbc,
+				.decrypt = _qcrypto_aead_decrypt_3des_cbc,
+		.init	= _qcrypto_cra_aead_sha256_init,
+		.exit	= _qcrypto_cra_aead_exit,
+	},
+};
+
+static struct aead_alg _qcrypto_aead_ccm_algo = {
+	.base = {
+		.cra_name	= "ccm(aes)",
+		.cra_driver_name = "qcrypto-aes-ccm",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+	},
+	.ivsize         = AES_BLOCK_SIZE,
+	.maxauthsize    = AES_BLOCK_SIZE,
+	.setkey = _qcrypto_aead_ccm_setkey,
+	.setauthsize = _qcrypto_aead_ccm_setauthsize,
+	.encrypt = _qcrypto_aead_encrypt_aes_ccm,
+	.decrypt = _qcrypto_aead_decrypt_aes_ccm,
+	.init	= _qcrypto_cra_aead_ccm_init,
+	.exit	= _qcrypto_cra_aead_exit,
+};
+
+static struct aead_alg _qcrypto_aead_rfc4309_ccm_algo = {
+	.base = {
+		.cra_name	= "rfc4309(ccm(aes))",
+		.cra_driver_name = "qcrypto-rfc4309-aes-ccm",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = 1,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+	},
+	.ivsize         = 8,
+	.maxauthsize    = 16,
+	.setkey = _qcrypto_aead_rfc4309_ccm_setkey,
+	.setauthsize = _qcrypto_aead_rfc4309_ccm_setauthsize,
+	.encrypt = _qcrypto_aead_rfc4309_enc_aes_ccm,
+	.decrypt = _qcrypto_aead_rfc4309_dec_aes_ccm,
+	.init	= _qcrypto_cra_aead_rfc4309_ccm_init,
+	.exit	= _qcrypto_cra_aead_exit,
+};
+
+static int  _qcrypto_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	void *handle;
+	struct crypto_priv *cp = &qcrypto_dev;
+	int i;
+	struct msm_ce_hw_support *platform_support;
+	struct crypto_engine *pengine;
+	unsigned long flags;
+	struct qcrypto_req_control *pqcrypto_req_control = NULL;
+
+	pengine = kzalloc(sizeof(*pengine), GFP_KERNEL);
+	if (!pengine)
+		return -ENOMEM;
+
+	/* open qce */
+	handle = qce_open(pdev, &rc);
+	if (handle == NULL) {
+		kzfree(pengine);
+		platform_set_drvdata(pdev, NULL);
+		return rc;
+	}
+
+	platform_set_drvdata(pdev, pengine);
+	pengine->qce = handle;
+	pengine->pcp = cp;
+	pengine->pdev = pdev;
+	pengine->signature = 0xdeadbeef;
+
+	init_timer(&(pengine->bw_reaper_timer));
+	INIT_WORK(&pengine->bw_reaper_ws, qcrypto_bw_reaper_work);
+	pengine->bw_reaper_timer.function =
+			qcrypto_bw_reaper_timer_callback;
+	INIT_WORK(&pengine->bw_allocate_ws, qcrypto_bw_allocate_work);
+	pengine->high_bw_req = false;
+	pengine->active_seq = 0;
+	pengine->last_active_seq = 0;
+	pengine->check_flag = false;
+	pengine->max_req_used = 0;
+	pengine->issue_req = false;
+
+	crypto_init_queue(&pengine->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH);
+
+	mutex_lock(&cp->engine_lock);
+	cp->total_units++;
+	pengine->unit = cp->total_units;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	pengine->first_engine = list_empty(&cp->engine_list);
+	if (pengine->first_engine)
+		cp->first_engine = pengine;
+	list_add_tail(&pengine->elist, &cp->engine_list);
+	cp->next_engine = pengine;
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	qce_hw_support(pengine->qce, &cp->ce_support);
+	pengine->ce_hw_instance = cp->ce_support.ce_hw_instance;
+	pengine->max_req = cp->ce_support.max_request;
+	pqcrypto_req_control = kzalloc(sizeof(struct qcrypto_req_control) *
+			pengine->max_req, GFP_KERNEL);
+	if (pqcrypto_req_control == NULL) {
+		rc = -ENOMEM;
+		goto err;
+	}
+	qcrypto_init_req_control(pengine, pqcrypto_req_control);
+	if (cp->ce_support.bam)	 {
+		cp->platform_support.ce_shared = cp->ce_support.is_shared;
+		cp->platform_support.shared_ce_resource = 0;
+		cp->platform_support.hw_key_support = cp->ce_support.hw_key;
+		cp->platform_support.sha_hmac = 1;
+
+		cp->platform_support.bus_scale_table =
+			(struct msm_bus_scale_pdata *)
+					msm_bus_cl_get_pdata(pdev);
+		if (!cp->platform_support.bus_scale_table)
+			pr_warn("bus_scale_table is NULL\n");
+
+		pengine->ce_device = cp->ce_support.ce_device;
+
+	} else {
+		platform_support =
+			(struct msm_ce_hw_support *)pdev->dev.platform_data;
+		cp->platform_support.ce_shared = platform_support->ce_shared;
+		cp->platform_support.shared_ce_resource =
+				platform_support->shared_ce_resource;
+		cp->platform_support.hw_key_support =
+				platform_support->hw_key_support;
+		cp->platform_support.bus_scale_table =
+				platform_support->bus_scale_table;
+		cp->platform_support.sha_hmac = platform_support->sha_hmac;
+	}
+
+	pengine->bus_scale_handle = 0;
+
+	if (cp->platform_support.bus_scale_table != NULL) {
+		pengine->bus_scale_handle =
+			msm_bus_scale_register_client(
+				(struct msm_bus_scale_pdata *)
+					cp->platform_support.bus_scale_table);
+		if (!pengine->bus_scale_handle) {
+			pr_err("%s not able to get bus scale\n",
+				__func__);
+			rc =  -ENOMEM;
+			goto err;
+		}
+		pengine->bw_state = BUS_NO_BANDWIDTH;
+	} else {
+		pengine->bw_state = BUS_HAS_BANDWIDTH;
+	}
+
+	if (cp->total_units != 1) {
+		mutex_unlock(&cp->engine_lock);
+		return 0;
+	}
+
+	/* register crypto cipher algorithms the device supports */
+	for (i = 0; i < ARRAY_SIZE(_qcrypto_ablk_cipher_algos); i++) {
+		struct qcrypto_alg *q_alg;
+
+		q_alg = _qcrypto_cipher_alg_alloc(cp,
+					&_qcrypto_ablk_cipher_algos[i]);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		if (cp->ce_support.use_sw_aes_cbc_ecb_ctr_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->cipher_alg.cra_name,
+					strlen(q_alg->cipher_alg.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+					"The algorithm name %s is too long.\n",
+					q_alg->cipher_alg.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_alg(&q_alg->cipher_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->cipher_alg.cra_driver_name);
+			kzfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->cipher_alg.cra_driver_name);
+		}
+	}
+
+	/* register crypto cipher algorithms the device supports */
+	if (cp->ce_support.aes_xts) {
+		struct qcrypto_alg *q_alg;
+
+		q_alg = _qcrypto_cipher_alg_alloc(cp,
+					&_qcrypto_ablk_cipher_xts_algo);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		if (cp->ce_support.use_sw_aes_xts_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->cipher_alg.cra_name,
+					strlen(q_alg->cipher_alg.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+					"The algorithm name %s is too long.\n",
+					q_alg->cipher_alg.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_alg(&q_alg->cipher_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->cipher_alg.cra_driver_name);
+			kzfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->cipher_alg.cra_driver_name);
+		}
+	}
+
+	/*
+	 * Register crypto hash (sha1 and sha256) algorithms the
+	 * device supports
+	 */
+	for (i = 0; i < ARRAY_SIZE(_qcrypto_ahash_algos); i++) {
+		struct qcrypto_alg *q_alg = NULL;
+
+		q_alg = _qcrypto_sha_alg_alloc(cp, &_qcrypto_ahash_algos[i]);
+
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		if (cp->ce_support.use_sw_ahash_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+				q_alg->sha_alg.halg.base.cra_name,
+				strlen(q_alg->sha_alg.halg.base.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+					"The algorithm name %s is too long.\n",
+					q_alg->sha_alg.halg.base.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_ahash(&q_alg->sha_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+			kzfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+		}
+	}
+
+	/* register crypto aead (hmac-sha1) algorithms the device supports */
+	if (cp->ce_support.sha1_hmac_20 || cp->ce_support.sha1_hmac
+		|| cp->ce_support.sha_hmac) {
+		for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha1_hmac_algos);
+									i++) {
+			struct qcrypto_alg *q_alg;
+
+			q_alg = _qcrypto_aead_alg_alloc(cp,
+					&_qcrypto_aead_sha1_hmac_algos[i]);
+			if (IS_ERR(q_alg)) {
+				rc = PTR_ERR(q_alg);
+				goto err;
+			}
+			if (cp->ce_support.use_sw_aead_algo) {
+				rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->aead_alg.base.cra_name,
+					strlen(q_alg->aead_alg.base.cra_name));
+				if (rc) {
+					dev_err(&pdev->dev,
+						"The algorithm name %s is too long.\n",
+						q_alg->aead_alg.base.cra_name);
+					kfree(q_alg);
+					goto err;
+				}
+			}
+			rc = crypto_register_aead(&q_alg->aead_alg);
+			if (rc) {
+				dev_err(&pdev->dev,
+					"%s alg registration failed\n",
+					q_alg->aead_alg.base.cra_driver_name);
+				kfree(q_alg);
+			} else {
+				list_add_tail(&q_alg->entry, &cp->alg_list);
+				dev_info(&pdev->dev, "%s\n",
+					q_alg->aead_alg.base.cra_driver_name);
+			}
+		}
+	}
+
+	/* register crypto aead (hmac-sha256) algorithms the device supports */
+	if (cp->ce_support.sha_hmac) {
+		for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha256_hmac_algos);
+									i++) {
+			struct qcrypto_alg *q_alg;
+
+			q_alg = _qcrypto_aead_alg_alloc(cp,
+					&_qcrypto_aead_sha256_hmac_algos[i]);
+			if (IS_ERR(q_alg)) {
+				rc = PTR_ERR(q_alg);
+				goto err;
+			}
+			if (cp->ce_support.use_sw_aead_algo) {
+				rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->aead_alg.base.cra_name,
+					strlen(q_alg->aead_alg.base.cra_name));
+				if (rc) {
+					dev_err(&pdev->dev,
+						"The algorithm name %s is too long.\n",
+						q_alg->aead_alg.base.cra_name);
+					kfree(q_alg);
+					goto err;
+				}
+			}
+			rc = crypto_register_aead(&q_alg->aead_alg);
+			if (rc) {
+				dev_err(&pdev->dev,
+					"%s alg registration failed\n",
+					q_alg->aead_alg.base.cra_driver_name);
+				kfree(q_alg);
+			} else {
+				list_add_tail(&q_alg->entry, &cp->alg_list);
+				dev_info(&pdev->dev, "%s\n",
+					q_alg->aead_alg.base.cra_driver_name);
+			}
+		}
+	}
+
+	if ((cp->ce_support.sha_hmac) || (cp->platform_support.sha_hmac)) {
+		/* register crypto hmac algorithms the device supports */
+		for (i = 0; i < ARRAY_SIZE(_qcrypto_sha_hmac_algos); i++) {
+			struct qcrypto_alg *q_alg = NULL;
+
+			q_alg = _qcrypto_sha_alg_alloc(cp,
+						&_qcrypto_sha_hmac_algos[i]);
+
+			if (IS_ERR(q_alg)) {
+				rc = PTR_ERR(q_alg);
+				goto err;
+			}
+			if (cp->ce_support.use_sw_hmac_algo) {
+				rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->sha_alg.halg.base.cra_name,
+					strlen(
+					q_alg->sha_alg.halg.base.cra_name));
+				if (rc) {
+					dev_err(&pdev->dev,
+					     "The algorithm name %s is too long.\n",
+					     q_alg->sha_alg.halg.base.cra_name);
+					kfree(q_alg);
+					goto err;
+				}
+			}
+			rc = crypto_register_ahash(&q_alg->sha_alg);
+			if (rc) {
+				dev_err(&pdev->dev,
+				"%s alg registration failed\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+				kzfree(q_alg);
+			} else {
+				list_add_tail(&q_alg->entry, &cp->alg_list);
+				dev_info(&pdev->dev, "%s\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+			}
+		}
+	}
+	/*
+	 * Register crypto cipher (aes-ccm) algorithms the
+	 * device supports
+	 */
+	if (cp->ce_support.aes_ccm) {
+		struct qcrypto_alg *q_alg;
+
+		q_alg = _qcrypto_aead_alg_alloc(cp, &_qcrypto_aead_ccm_algo);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		if (cp->ce_support.use_sw_aes_ccm_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->aead_alg.base.cra_name,
+					strlen(q_alg->aead_alg.base.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+						"The algorithm name %s is too long.\n",
+						q_alg->aead_alg.base.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_aead(&q_alg->aead_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->aead_alg.base.cra_driver_name);
+			kzfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->aead_alg.base.cra_driver_name);
+		}
+
+		q_alg = _qcrypto_aead_alg_alloc(cp,
+					&_qcrypto_aead_rfc4309_ccm_algo);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+
+		if (cp->ce_support.use_sw_aes_ccm_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->aead_alg.base.cra_name,
+					strlen(q_alg->aead_alg.base.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+						"The algorithm name %s is too long.\n",
+						q_alg->aead_alg.base.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_aead(&q_alg->aead_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->aead_alg.base.cra_driver_name);
+			kfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->aead_alg.base.cra_driver_name);
+		}
+	}
+	mutex_unlock(&cp->engine_lock);
+
+
+	return 0;
+err:
+	_qcrypto_remove_engine(pengine);
+	mutex_unlock(&cp->engine_lock);
+	if (pengine->qce)
+		qce_close(pengine->qce);
+	kzfree(pengine);
+	return rc;
+};
+
+static int _qcrypto_engine_in_use(struct crypto_engine *pengine)
+{
+	struct crypto_priv *cp = pengine->pcp;
+
+	if ((atomic_read(&pengine->req_count) > 0) || pengine->req_queue.qlen
+					|| cp->req_queue.qlen)
+		return 1;
+	return 0;
+}
+
+static void _qcrypto_do_suspending(struct crypto_engine *pengine)
+{
+	struct crypto_priv *cp = pengine->pcp;
+
+	if (cp->platform_support.bus_scale_table == NULL)
+		return;
+	del_timer_sync(&pengine->bw_reaper_timer);
+	qcrypto_ce_set_bus(pengine, false);
+}
+
+static int  _qcrypto_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	int ret = 0;
+	struct crypto_engine *pengine;
+	struct crypto_priv *cp;
+	unsigned long flags;
+
+	pengine = platform_get_drvdata(pdev);
+	if (!pengine)
+		return -EINVAL;
+
+	/*
+	 * Check if this platform supports clock management in suspend/resume
+	 * If not, just simply return 0.
+	 */
+	cp = pengine->pcp;
+	if (!cp->ce_support.clk_mgmt_sus_res)
+		return 0;
+	spin_lock_irqsave(&cp->lock, flags);
+	switch (pengine->bw_state) {
+	case BUS_NO_BANDWIDTH:
+		if (pengine->high_bw_req == false)
+			pengine->bw_state = BUS_SUSPENDED;
+		else
+			ret = -EBUSY;
+		break;
+	case BUS_HAS_BANDWIDTH:
+		if (_qcrypto_engine_in_use(pengine)) {
+			ret = -EBUSY;
+		} else {
+			pengine->bw_state = BUS_SUSPENDING;
+			spin_unlock_irqrestore(&cp->lock, flags);
+			_qcrypto_do_suspending(pengine);
+			spin_lock_irqsave(&cp->lock, flags);
+			pengine->bw_state = BUS_SUSPENDED;
+		}
+		break;
+	case BUS_BANDWIDTH_RELEASING:
+	case BUS_BANDWIDTH_ALLOCATING:
+	case BUS_SUSPENDED:
+	case BUS_SUSPENDING:
+	default:
+			ret = -EBUSY;
+			break;
+	}
+
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (ret)
+		return ret;
+	if (qce_pm_table.suspend)
+		qce_pm_table.suspend(pengine->qce);
+	return 0;
+}
+
+static int  _qcrypto_resume(struct platform_device *pdev)
+{
+	struct crypto_engine *pengine;
+	struct crypto_priv *cp;
+	unsigned long flags;
+	int ret = 0;
+
+	pengine = platform_get_drvdata(pdev);
+
+	if (!pengine)
+		return -EINVAL;
+	cp = pengine->pcp;
+	if (!cp->ce_support.clk_mgmt_sus_res)
+		return 0;
+	spin_lock_irqsave(&cp->lock, flags);
+	if (pengine->bw_state == BUS_SUSPENDED) {
+		spin_unlock_irqrestore(&cp->lock, flags);
+		if (qce_pm_table.resume)
+			qce_pm_table.resume(pengine->qce);
+
+		spin_lock_irqsave(&cp->lock, flags);
+		pengine->bw_state = BUS_NO_BANDWIDTH;
+		pengine->active_seq++;
+		pengine->check_flag = false;
+		if (cp->req_queue.qlen || pengine->req_queue.qlen) {
+			if (pengine->high_bw_req == false) {
+				qcrypto_ce_bw_allocate_req(pengine);
+				pengine->high_bw_req = true;
+			}
+		}
+	} else
+		ret = -EBUSY;
+
+	spin_unlock_irqrestore(&cp->lock, flags);
+	return ret;
+}
+
+static const struct of_device_id qcrypto_match[] = {
+	{	.compatible = "qcom,qcrypto",
+	},
+	{}
+};
+
+static struct platform_driver __qcrypto = {
+	.probe          = _qcrypto_probe,
+	.remove         = _qcrypto_remove,
+	.suspend        = _qcrypto_suspend,
+	.resume         = _qcrypto_resume,
+	.driver         = {
+		.owner  = THIS_MODULE,
+		.name   = "qcrypto",
+		.of_match_table = qcrypto_match,
+	},
+};
+
+static int _debug_qcrypto;
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	int rc = -EINVAL;
+	int qcrypto = *((int *) file->private_data);
+	int len;
+
+	len = _disp_stats(qcrypto);
+
+	if (len <= count)
+		rc = simple_read_from_buffer((void __user *) buf, len,
+			ppos, (void *) _debug_read_buf, len);
+	return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	unsigned long flags;
+	struct crypto_priv *cp = &qcrypto_dev;
+	struct crypto_engine *pe;
+	int i;
+
+	memset((char *)&_qcrypto_stat, 0, sizeof(struct crypto_stat));
+	spin_lock_irqsave(&cp->lock, flags);
+	list_for_each_entry(pe, &cp->engine_list, elist) {
+		pe->total_req = 0;
+		pe->err_req = 0;
+		qce_clear_driver_stats(pe->qce);
+		pe->max_req_used = 0;
+	}
+	cp->max_qlen = 0;
+	cp->resp_start = 0;
+	cp->resp_stop = 0;
+	cp->no_avail = 0;
+	cp->max_resp_qlen = 0;
+	cp->queue_work_eng3 = 0;
+	cp->queue_work_not_eng3 = 0;
+	cp->queue_work_not_eng3_nz = 0;
+	cp->max_reorder_cnt = 0;
+	for (i = 0; i < MAX_SMP_CPU + 1; i++)
+		cp->cpu_req[i] = 0;
+	spin_unlock_irqrestore(&cp->lock, flags);
+	return count;
+}
+
+static const struct file_operations _debug_stats_ops = {
+	.open =         _debug_stats_open,
+	.read =         _debug_stats_read,
+	.write =        _debug_stats_write,
+};
+
+static int _qcrypto_debug_init(void)
+{
+	int rc;
+	char name[DEBUG_MAX_FNAME];
+	struct dentry *dent;
+
+	_debug_dent = debugfs_create_dir("qcrypto", NULL);
+	if (IS_ERR(_debug_dent)) {
+		pr_err("qcrypto debugfs_create_dir fail, error %ld\n",
+				PTR_ERR(_debug_dent));
+		return PTR_ERR(_debug_dent);
+	}
+
+	snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
+	_debug_qcrypto = 0;
+	dent = debugfs_create_file(name, 0644, _debug_dent,
+				&_debug_qcrypto, &_debug_stats_ops);
+	if (dent == NULL) {
+		pr_err("qcrypto debugfs_create_file fail, error %ld\n",
+				PTR_ERR(dent));
+		rc = PTR_ERR(dent);
+		goto err;
+	}
+	return 0;
+err:
+	debugfs_remove_recursive(_debug_dent);
+	return rc;
+}
+
+static int __init _qcrypto_init(void)
+{
+	int rc;
+	struct crypto_priv *pcp = &qcrypto_dev;
+
+	rc = _qcrypto_debug_init();
+	if (rc)
+		return rc;
+	INIT_LIST_HEAD(&pcp->alg_list);
+	INIT_LIST_HEAD(&pcp->engine_list);
+	init_llist_head(&pcp->ordered_resp_list);
+	spin_lock_init(&pcp->lock);
+	mutex_init(&pcp->engine_lock);
+	pcp->resp_wq = alloc_workqueue("qcrypto_seq_response_wq",
+			WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
+	if (!pcp->resp_wq) {
+		pr_err("Error allocating workqueue\n");
+		return -ENOMEM;
+	}
+	INIT_WORK(&pcp->resp_work, seq_response);
+	pcp->total_units = 0;
+	pcp->platform_support.bus_scale_table = NULL;
+	pcp->next_engine = NULL;
+	pcp->scheduled_eng = NULL;
+	pcp->ce_req_proc_sts = IN_PROGRESS;
+	crypto_init_queue(&pcp->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH);
+	return platform_driver_register(&__qcrypto);
+}
+
+static void __exit _qcrypto_exit(void)
+{
+	pr_debug("%s Unregister QCRYPTO\n", __func__);
+	debugfs_remove_recursive(_debug_dent);
+	platform_driver_unregister(&__qcrypto);
+}
+
+module_init(_qcrypto_init);
+module_exit(_qcrypto_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI Crypto driver");
diff --git a/drivers/crypto/msm/qcryptohw_50.h b/drivers/crypto/msm/qcryptohw_50.h
new file mode 100644
index 0000000..574f579
--- /dev/null
+++ b/drivers/crypto/msm/qcryptohw_50.h
@@ -0,0 +1,528 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
+
+
+#define CRYPTO_BAM_CNFG_BITS_REG		0x0007C
+#define CRYPTO_BAM_CD_ENABLE			27
+#define CRYPTO_BAM_CD_ENABLE_MASK		(1 << CRYPTO_BAM_CD_ENABLE)
+
+#define QCE_AUTH_REG_BYTE_COUNT 4
+#define CRYPTO_VERSION_REG			0x1A000
+
+#define CRYPTO_DATA_IN0_REG			0x1A010
+#define CRYPTO_DATA_IN1_REG			0x1A014
+#define CRYPTO_DATA_IN2_REG			0x1A018
+#define CRYPTO_DATA_IN3_REG			0x1A01C
+
+#define CRYPTO_DATA_OUT0_REG			0x1A020
+#define CRYPTO_DATA_OUT1_REG			0x1A024
+#define CRYPTO_DATA_OUT2_REG			0x1A028
+#define CRYPTO_DATA_OUT3_REG			0x1A02C
+
+#define CRYPTO_STATUS_REG			0x1A100
+#define CRYPTO_STATUS2_REG			0x1A104
+#define CRYPTO_ENGINES_AVAIL			0x1A108
+#define CRYPTO_FIFO_SIZES_REG			0x1A10C
+
+#define CRYPTO_SEG_SIZE_REG			0x1A110
+#define CRYPTO_GOPROC_REG			0x1A120
+#define CRYPTO_GOPROC_QC_KEY_REG		0x1B000
+#define CRYPTO_GOPROC_OEM_KEY_REG		0x1C000
+
+#define CRYPTO_ENCR_SEG_CFG_REG			0x1A200
+#define CRYPTO_ENCR_SEG_SIZE_REG		0x1A204
+#define CRYPTO_ENCR_SEG_START_REG		0x1A208
+
+#define CRYPTO_ENCR_KEY0_REG			0x1D000
+#define CRYPTO_ENCR_KEY1_REG			0x1D004
+#define CRYPTO_ENCR_KEY2_REG			0x1D008
+#define CRYPTO_ENCR_KEY3_REG			0x1D00C
+#define CRYPTO_ENCR_KEY4_REG			0x1D010
+#define CRYPTO_ENCR_KEY5_REG			0x1D014
+#define CRYPTO_ENCR_KEY6_REG			0x1D018
+#define CRYPTO_ENCR_KEY7_REG			0x1D01C
+
+#define CRYPTO_ENCR_XTS_KEY0_REG		0x1D020
+#define CRYPTO_ENCR_XTS_KEY1_REG		0x1D024
+#define CRYPTO_ENCR_XTS_KEY2_REG		0x1D028
+#define CRYPTO_ENCR_XTS_KEY3_REG		0x1D02C
+#define CRYPTO_ENCR_XTS_KEY4_REG		0x1D030
+#define CRYPTO_ENCR_XTS_KEY5_REG		0x1D034
+#define CRYPTO_ENCR_XTS_KEY6_REG		0x1D038
+#define CRYPTO_ENCR_XTS_KEY7_REG		0x1D03C
+
+#define CRYPTO_ENCR_PIPE0_KEY0_REG		0x1E000
+#define CRYPTO_ENCR_PIPE0_KEY1_REG		0x1E004
+#define CRYPTO_ENCR_PIPE0_KEY2_REG		0x1E008
+#define CRYPTO_ENCR_PIPE0_KEY3_REG		0x1E00C
+#define CRYPTO_ENCR_PIPE0_KEY4_REG		0x1E010
+#define CRYPTO_ENCR_PIPE0_KEY5_REG		0x1E014
+#define CRYPTO_ENCR_PIPE0_KEY6_REG		0x1E018
+#define CRYPTO_ENCR_PIPE0_KEY7_REG		0x1E01C
+
+#define CRYPTO_ENCR_PIPE1_KEY0_REG		0x1E020
+#define CRYPTO_ENCR_PIPE1_KEY1_REG		0x1E024
+#define CRYPTO_ENCR_PIPE1_KEY2_REG		0x1E028
+#define CRYPTO_ENCR_PIPE1_KEY3_REG		0x1E02C
+#define CRYPTO_ENCR_PIPE1_KEY4_REG		0x1E030
+#define CRYPTO_ENCR_PIPE1_KEY5_REG		0x1E034
+#define CRYPTO_ENCR_PIPE1_KEY6_REG		0x1E038
+#define CRYPTO_ENCR_PIPE1_KEY7_REG		0x1E03C
+
+#define CRYPTO_ENCR_PIPE2_KEY0_REG		0x1E040
+#define CRYPTO_ENCR_PIPE2_KEY1_REG		0x1E044
+#define CRYPTO_ENCR_PIPE2_KEY2_REG		0x1E048
+#define CRYPTO_ENCR_PIPE2_KEY3_REG		0x1E04C
+#define CRYPTO_ENCR_PIPE2_KEY4_REG		0x1E050
+#define CRYPTO_ENCR_PIPE2_KEY5_REG		0x1E054
+#define CRYPTO_ENCR_PIPE2_KEY6_REG		0x1E058
+#define CRYPTO_ENCR_PIPE2_KEY7_REG		0x1E05C
+
+#define CRYPTO_ENCR_PIPE3_KEY0_REG		0x1E060
+#define CRYPTO_ENCR_PIPE3_KEY1_REG		0x1E064
+#define CRYPTO_ENCR_PIPE3_KEY2_REG		0x1E068
+#define CRYPTO_ENCR_PIPE3_KEY3_REG		0x1E06C
+#define CRYPTO_ENCR_PIPE3_KEY4_REG		0x1E070
+#define CRYPTO_ENCR_PIPE3_KEY5_REG		0x1E074
+#define CRYPTO_ENCR_PIPE3_KEY6_REG		0x1E078
+#define CRYPTO_ENCR_PIPE3_KEY7_REG		0x1E07C
+
+
+#define CRYPTO_ENCR_PIPE0_XTS_KEY0_REG		0x1E200
+#define CRYPTO_ENCR_PIPE0_XTS_KEY1_REG		0x1E204
+#define CRYPTO_ENCR_PIPE0_XTS_KEY2_REG		0x1E208
+#define CRYPTO_ENCR_PIPE0_XTS_KEY3_REG		0x1E20C
+#define CRYPTO_ENCR_PIPE0_XTS_KEY4_REG		0x1E210
+#define CRYPTO_ENCR_PIPE0_XTS_KEY5_REG		0x1E214
+#define CRYPTO_ENCR_PIPE0_XTS_KEY6_REG		0x1E218
+#define CRYPTO_ENCR_PIPE0_XTS_KEY7_REG		0x1E21C
+
+#define CRYPTO_ENCR_PIPE1_XTS_KEY0_REG		0x1E220
+#define CRYPTO_ENCR_PIPE1_XTS_KEY1_REG		0x1E224
+#define CRYPTO_ENCR_PIPE1_XTS_KEY2_REG		0x1E228
+#define CRYPTO_ENCR_PIPE1_XTS_KEY3_REG		0x1E22C
+#define CRYPTO_ENCR_PIPE1_XTS_KEY4_REG		0x1E230
+#define CRYPTO_ENCR_PIPE1_XTS_KEY5_REG		0x1E234
+#define CRYPTO_ENCR_PIPE1_XTS_KEY6_REG		0x1E238
+#define CRYPTO_ENCR_PIPE1_XTS_KEY7_REG		0x1E23C
+
+#define CRYPTO_ENCR_PIPE2_XTS_KEY0_REG		0x1E240
+#define CRYPTO_ENCR_PIPE2_XTS_KEY1_REG		0x1E244
+#define CRYPTO_ENCR_PIPE2_XTS_KEY2_REG		0x1E248
+#define CRYPTO_ENCR_PIPE2_XTS_KEY3_REG		0x1E24C
+#define CRYPTO_ENCR_PIPE2_XTS_KEY4_REG		0x1E250
+#define CRYPTO_ENCR_PIPE2_XTS_KEY5_REG		0x1E254
+#define CRYPTO_ENCR_PIPE2_XTS_KEY6_REG		0x1E258
+#define CRYPTO_ENCR_PIPE2_XTS_KEY7_REG		0x1E25C
+
+#define CRYPTO_ENCR_PIPE3_XTS_KEY0_REG		0x1E260
+#define CRYPTO_ENCR_PIPE3_XTS_KEY1_REG		0x1E264
+#define CRYPTO_ENCR_PIPE3_XTS_KEY2_REG		0x1E268
+#define CRYPTO_ENCR_PIPE3_XTS_KEY3_REG		0x1E26C
+#define CRYPTO_ENCR_PIPE3_XTS_KEY4_REG		0x1E270
+#define CRYPTO_ENCR_PIPE3_XTS_KEY5_REG		0x1E274
+#define CRYPTO_ENCR_PIPE3_XTS_KEY6_REG		0x1E278
+#define CRYPTO_ENCR_PIPE3_XTS_KEY7_REG		0x1E27C
+
+
+#define CRYPTO_CNTR0_IV0_REG			0x1A20C
+#define CRYPTO_CNTR1_IV1_REG			0x1A210
+#define CRYPTO_CNTR2_IV2_REG			0x1A214
+#define CRYPTO_CNTR3_IV3_REG			0x1A218
+
+#define CRYPTO_CNTR_MASK_REG0			0x1A23C
+#define CRYPTO_CNTR_MASK_REG1			0x1A238
+#define CRYPTO_CNTR_MASK_REG2			0x1A234
+#define CRYPTO_CNTR_MASK_REG			0x1A21C
+
+#define CRYPTO_ENCR_CCM_INT_CNTR0_REG		0x1A220
+#define CRYPTO_ENCR_CCM_INT_CNTR1_REG		0x1A224
+#define CRYPTO_ENCR_CCM_INT_CNTR2_REG		0x1A228
+#define CRYPTO_ENCR_CCM_INT_CNTR3_REG		0x1A22C
+
+#define CRYPTO_ENCR_XTS_DU_SIZE_REG		0x1A230
+
+#define CRYPTO_AUTH_SEG_CFG_REG			0x1A300
+#define CRYPTO_AUTH_SEG_SIZE_REG		0x1A304
+#define CRYPTO_AUTH_SEG_START_REG		0x1A308
+
+#define CRYPTO_AUTH_KEY0_REG			0x1D040
+#define CRYPTO_AUTH_KEY1_REG			0x1D044
+#define CRYPTO_AUTH_KEY2_REG			0x1D048
+#define CRYPTO_AUTH_KEY3_REG			0x1D04C
+#define CRYPTO_AUTH_KEY4_REG			0x1D050
+#define CRYPTO_AUTH_KEY5_REG			0x1D054
+#define CRYPTO_AUTH_KEY6_REG			0x1D058
+#define CRYPTO_AUTH_KEY7_REG			0x1D05C
+#define CRYPTO_AUTH_KEY8_REG			0x1D060
+#define CRYPTO_AUTH_KEY9_REG			0x1D064
+#define CRYPTO_AUTH_KEY10_REG			0x1D068
+#define CRYPTO_AUTH_KEY11_REG			0x1D06C
+#define CRYPTO_AUTH_KEY12_REG			0x1D070
+#define CRYPTO_AUTH_KEY13_REG			0x1D074
+#define CRYPTO_AUTH_KEY14_REG			0x1D078
+#define CRYPTO_AUTH_KEY15_REG			0x1D07C
+
+#define CRYPTO_AUTH_PIPE0_KEY0_REG		0x1E800
+#define CRYPTO_AUTH_PIPE0_KEY1_REG		0x1E804
+#define CRYPTO_AUTH_PIPE0_KEY2_REG		0x1E808
+#define CRYPTO_AUTH_PIPE0_KEY3_REG		0x1E80C
+#define CRYPTO_AUTH_PIPE0_KEY4_REG		0x1E810
+#define CRYPTO_AUTH_PIPE0_KEY5_REG		0x1E814
+#define CRYPTO_AUTH_PIPE0_KEY6_REG		0x1E818
+#define CRYPTO_AUTH_PIPE0_KEY7_REG		0x1E81C
+#define CRYPTO_AUTH_PIPE0_KEY8_REG		0x1E820
+#define CRYPTO_AUTH_PIPE0_KEY9_REG		0x1E824
+#define CRYPTO_AUTH_PIPE0_KEY10_REG		0x1E828
+#define CRYPTO_AUTH_PIPE0_KEY11_REG		0x1E82C
+#define CRYPTO_AUTH_PIPE0_KEY12_REG		0x1E830
+#define CRYPTO_AUTH_PIPE0_KEY13_REG		0x1E834
+#define CRYPTO_AUTH_PIPE0_KEY14_REG		0x1E838
+#define CRYPTO_AUTH_PIPE0_KEY15_REG		0x1E83C
+
+#define CRYPTO_AUTH_PIPE1_KEY0_REG		0x1E880
+#define CRYPTO_AUTH_PIPE1_KEY1_REG		0x1E884
+#define CRYPTO_AUTH_PIPE1_KEY2_REG		0x1E888
+#define CRYPTO_AUTH_PIPE1_KEY3_REG		0x1E88C
+#define CRYPTO_AUTH_PIPE1_KEY4_REG		0x1E890
+#define CRYPTO_AUTH_PIPE1_KEY5_REG		0x1E894
+#define CRYPTO_AUTH_PIPE1_KEY6_REG		0x1E898
+#define CRYPTO_AUTH_PIPE1_KEY7_REG		0x1E89C
+#define CRYPTO_AUTH_PIPE1_KEY8_REG		0x1E8A0
+#define CRYPTO_AUTH_PIPE1_KEY9_REG		0x1E8A4
+#define CRYPTO_AUTH_PIPE1_KEY10_REG		0x1E8A8
+#define CRYPTO_AUTH_PIPE1_KEY11_REG		0x1E8AC
+#define CRYPTO_AUTH_PIPE1_KEY12_REG		0x1E8B0
+#define CRYPTO_AUTH_PIPE1_KEY13_REG		0x1E8B4
+#define CRYPTO_AUTH_PIPE1_KEY14_REG		0x1E8B8
+#define CRYPTO_AUTH_PIPE1_KEY15_REG		0x1E8BC
+
+#define CRYPTO_AUTH_PIPE2_KEY0_REG		0x1E900
+#define CRYPTO_AUTH_PIPE2_KEY1_REG		0x1E904
+#define CRYPTO_AUTH_PIPE2_KEY2_REG		0x1E908
+#define CRYPTO_AUTH_PIPE2_KEY3_REG		0x1E90C
+#define CRYPTO_AUTH_PIPE2_KEY4_REG		0x1E910
+#define CRYPTO_AUTH_PIPE2_KEY5_REG		0x1E914
+#define CRYPTO_AUTH_PIPE2_KEY6_REG		0x1E918
+#define CRYPTO_AUTH_PIPE2_KEY7_REG		0x1E91C
+#define CRYPTO_AUTH_PIPE2_KEY8_REG		0x1E920
+#define CRYPTO_AUTH_PIPE2_KEY9_REG		0x1E924
+#define CRYPTO_AUTH_PIPE2_KEY10_REG		0x1E928
+#define CRYPTO_AUTH_PIPE2_KEY11_REG		0x1E92C
+#define CRYPTO_AUTH_PIPE2_KEY12_REG		0x1E930
+#define CRYPTO_AUTH_PIPE2_KEY13_REG		0x1E934
+#define CRYPTO_AUTH_PIPE2_KEY14_REG		0x1E938
+#define CRYPTO_AUTH_PIPE2_KEY15_REG		0x1E93C
+
+#define CRYPTO_AUTH_PIPE3_KEY0_REG		0x1E980
+#define CRYPTO_AUTH_PIPE3_KEY1_REG		0x1E984
+#define CRYPTO_AUTH_PIPE3_KEY2_REG		0x1E988
+#define CRYPTO_AUTH_PIPE3_KEY3_REG		0x1E98C
+#define CRYPTO_AUTH_PIPE3_KEY4_REG		0x1E990
+#define CRYPTO_AUTH_PIPE3_KEY5_REG		0x1E994
+#define CRYPTO_AUTH_PIPE3_KEY6_REG		0x1E998
+#define CRYPTO_AUTH_PIPE3_KEY7_REG		0x1E99C
+#define CRYPTO_AUTH_PIPE3_KEY8_REG		0x1E9A0
+#define CRYPTO_AUTH_PIPE3_KEY9_REG		0x1E9A4
+#define CRYPTO_AUTH_PIPE3_KEY10_REG		0x1E9A8
+#define CRYPTO_AUTH_PIPE3_KEY11_REG		0x1E9AC
+#define CRYPTO_AUTH_PIPE3_KEY12_REG		0x1E9B0
+#define CRYPTO_AUTH_PIPE3_KEY13_REG		0x1E9B4
+#define CRYPTO_AUTH_PIPE3_KEY14_REG		0x1E9B8
+#define CRYPTO_AUTH_PIPE3_KEY15_REG		0x1E9BC
+
+
+#define CRYPTO_AUTH_IV0_REG			0x1A310
+#define CRYPTO_AUTH_IV1_REG			0x1A314
+#define CRYPTO_AUTH_IV2_REG			0x1A318
+#define CRYPTO_AUTH_IV3_REG			0x1A31C
+#define CRYPTO_AUTH_IV4_REG			0x1A320
+#define CRYPTO_AUTH_IV5_REG			0x1A324
+#define CRYPTO_AUTH_IV6_REG			0x1A328
+#define CRYPTO_AUTH_IV7_REG			0x1A32C
+#define CRYPTO_AUTH_IV8_REG			0x1A330
+#define CRYPTO_AUTH_IV9_REG			0x1A334
+#define CRYPTO_AUTH_IV10_REG			0x1A338
+#define CRYPTO_AUTH_IV11_REG			0x1A33C
+#define CRYPTO_AUTH_IV12_REG			0x1A340
+#define CRYPTO_AUTH_IV13_REG			0x1A344
+#define CRYPTO_AUTH_IV14_REG			0x1A348
+#define CRYPTO_AUTH_IV15_REG			0x1A34C
+
+#define CRYPTO_AUTH_INFO_NONCE0_REG		0x1A350
+#define CRYPTO_AUTH_INFO_NONCE1_REG		0x1A354
+#define CRYPTO_AUTH_INFO_NONCE2_REG		0x1A358
+#define CRYPTO_AUTH_INFO_NONCE3_REG		0x1A35C
+
+#define CRYPTO_AUTH_BYTECNT0_REG		0x1A390
+#define CRYPTO_AUTH_BYTECNT1_REG		0x1A394
+#define CRYPTO_AUTH_BYTECNT2_REG		0x1A398
+#define CRYPTO_AUTH_BYTECNT3_REG		0x1A39C
+
+#define CRYPTO_AUTH_EXP_MAC0_REG		0x1A3A0
+#define CRYPTO_AUTH_EXP_MAC1_REG		0x1A3A4
+#define CRYPTO_AUTH_EXP_MAC2_REG		0x1A3A8
+#define CRYPTO_AUTH_EXP_MAC3_REG		0x1A3AC
+#define CRYPTO_AUTH_EXP_MAC4_REG		0x1A3B0
+#define CRYPTO_AUTH_EXP_MAC5_REG		0x1A3B4
+#define CRYPTO_AUTH_EXP_MAC6_REG		0x1A3B8
+#define CRYPTO_AUTH_EXP_MAC7_REG		0x1A3BC
+
+#define CRYPTO_CONFIG_REG			0x1A400
+#define CRYPTO_DEBUG_ENABLE_REG			0x1AF00
+#define CRYPTO_DEBUG_REG			0x1AF04
+
+
+
+/* Register bits */
+#define CRYPTO_CORE_STEP_REV_MASK		0xFFFF
+#define CRYPTO_CORE_STEP_REV			0 /* bit 15-0 */
+#define CRYPTO_CORE_MAJOR_REV_MASK		0xFF000000
+#define CRYPTO_CORE_MAJOR_REV			24 /* bit 31-24 */
+#define CRYPTO_CORE_MINOR_REV_MASK		0xFF0000
+#define CRYPTO_CORE_MINOR_REV			16 /* bit 23-16 */
+
+/* status reg  */
+#define CRYPTO_MAC_FAILED			31
+#define CRYPTO_DOUT_SIZE_AVAIL			26 /* bit 30-26 */
+#define CRYPTO_DOUT_SIZE_AVAIL_MASK		(0x1F << CRYPTO_DOUT_SIZE_AVAIL)
+#define CRYPTO_DIN_SIZE_AVAIL			21 /* bit 21-25 */
+#define CRYPTO_DIN_SIZE_AVAIL_MASK		(0x1F << CRYPTO_DIN_SIZE_AVAIL)
+#define CRYPTO_HSD_ERR				20
+#define CRYPTO_ACCESS_VIOL			19
+#define CRYPTO_PIPE_ACTIVE_ERR			18
+#define CRYPTO_CFG_CHNG_ERR			17
+#define CRYPTO_DOUT_ERR				16
+#define CRYPTO_DIN_ERR				15
+#define CRYPTO_AXI_ERR				14
+#define CRYPTO_CRYPTO_STATE			10 /* bit 13-10 */
+#define CRYPTO_CRYPTO_STATE_MASK		(0xF << CRYPTO_CRYPTO_STATE)
+#define CRYPTO_ENCR_BUSY			9
+#define CRYPTO_AUTH_BUSY			8
+#define CRYPTO_DOUT_INTR			7
+#define CRYPTO_DIN_INTR				6
+#define CRYPTO_OP_DONE_INTR			5
+#define CRYPTO_ERR_INTR				4
+#define CRYPTO_DOUT_RDY				3
+#define CRYPTO_DIN_RDY				2
+#define CRYPTO_OPERATION_DONE			1
+#define CRYPTO_SW_ERR				0
+
+/* status2 reg  */
+#define CRYPTO_AXI_EXTRA			1
+#define CRYPTO_LOCKED				2
+
+/* config reg */
+#define CRYPTO_REQ_SIZE				17 /* bit 20-17 */
+#define CRYPTO_REQ_SIZE_MASK			(0xF << CRYPTO_REQ_SIZE)
+#define CRYPTO_REQ_SIZE_ENUM_1_BEAT	0
+#define CRYPTO_REQ_SIZE_ENUM_2_BEAT	1
+#define CRYPTO_REQ_SIZE_ENUM_3_BEAT	2
+#define CRYPTO_REQ_SIZE_ENUM_4_BEAT	3
+#define CRYPTO_REQ_SIZE_ENUM_5_BEAT	4
+#define CRYPTO_REQ_SIZE_ENUM_6_BEAT	5
+#define CRYPTO_REQ_SIZE_ENUM_7_BEAT	6
+#define CRYPTO_REQ_SIZE_ENUM_8_BEAT	7
+#define CRYPTO_REQ_SIZE_ENUM_9_BEAT	8
+#define CRYPTO_REQ_SIZE_ENUM_10_BEAT	9
+#define CRYPTO_REQ_SIZE_ENUM_11_BEAT	10
+#define CRYPTO_REQ_SIZE_ENUM_12_BEAT	11
+#define CRYPTO_REQ_SIZE_ENUM_13_BEAT	12
+#define CRYPTO_REQ_SIZE_ENUM_14_BEAT	13
+#define CRYPTO_REQ_SIZE_ENUM_15_BEAT	14
+#define CRYPTO_REQ_SIZE_ENUM_16_BEAT	15
+
+#define CRYPTO_MAX_QUEUED_REQ			14 /* bit 16-14 */
+#define CRYPTO_MAX_QUEUED_REQ_MASK		(0x7 << CRYPTO_MAX_QUEUED_REQ)
+#define CRYPTO_ENUM_1_QUEUED_REQS	0
+#define CRYPTO_ENUM_2_QUEUED_REQS	1
+#define CRYPTO_ENUM_3_QUEUED_REQS	2
+
+#define CRYPTO_IRQ_ENABLES			10	/* bit 13-10 */
+#define CRYPTO_IRQ_ENABLES_MASK			(0xF << CRYPTO_IRQ_ENABLES)
+
+#define CRYPTO_LITTLE_ENDIAN_MODE		9
+#define CRYPTO_LITTLE_ENDIAN_MASK		(1 << CRYPTO_LITTLE_ENDIAN_MODE)
+#define CRYPTO_PIPE_SET_SELECT			5 /* bit 8-5 */
+#define CRYPTO_PIPE_SET_SELECT_MASK		(0xF << CRYPTO_PIPE_SET_SELECT)
+
+#define CRYPTO_HIGH_SPD_EN_N			4
+
+#define CRYPTO_MASK_DOUT_INTR			3
+#define CRYPTO_MASK_DIN_INTR			2
+#define CRYPTO_MASK_OP_DONE_INTR		1
+#define CRYPTO_MASK_ERR_INTR			0
+
+/* auth_seg_cfg reg */
+#define CRYPTO_COMP_EXP_MAC			24
+#define CRYPTO_COMP_EXP_MAC_DISABLED		0
+#define CRYPTO_COMP_EXP_MAC_ENABLED		1
+
+#define CRYPTO_F9_DIRECTION			23
+#define CRYPTO_F9_DIRECTION_UPLINK		0
+#define CRYPTO_F9_DIRECTION_DOWNLINK		1
+
+#define CRYPTO_AUTH_NONCE_NUM_WORDS		20 /* bit 22-20 */
+#define CRYPTO_AUTH_NONCE_NUM_WORDS_MASK \
+				(0x7 << CRYPTO_AUTH_NONCE_NUM_WORDS)
+
+#define CRYPTO_USE_PIPE_KEY_AUTH		19
+#define CRYPTO_USE_HW_KEY_AUTH			18
+#define CRYPTO_FIRST				17
+#define CRYPTO_LAST				16
+
+#define CRYPTO_AUTH_POS				14 /* bit 15 .. 14*/
+#define CRYPTO_AUTH_POS_MASK			(0x3 << CRYPTO_AUTH_POS)
+#define CRYPTO_AUTH_POS_BEFORE			0
+#define CRYPTO_AUTH_POS_AFTER			1
+
+#define CRYPTO_AUTH_SIZE			9 /* bits 13 .. 9*/
+#define CRYPTO_AUTH_SIZE_MASK			(0x1F << CRYPTO_AUTH_SIZE)
+#define CRYPTO_AUTH_SIZE_SHA1		0
+#define CRYPTO_AUTH_SIZE_SHA256		1
+#define CRYPTO_AUTH_SIZE_ENUM_1_BYTES	0
+#define CRYPTO_AUTH_SIZE_ENUM_2_BYTES	1
+#define CRYPTO_AUTH_SIZE_ENUM_3_BYTES	2
+#define CRYPTO_AUTH_SIZE_ENUM_4_BYTES	3
+#define CRYPTO_AUTH_SIZE_ENUM_5_BYTES	4
+#define CRYPTO_AUTH_SIZE_ENUM_6_BYTES	5
+#define CRYPTO_AUTH_SIZE_ENUM_7_BYTES	6
+#define CRYPTO_AUTH_SIZE_ENUM_8_BYTES	7
+#define CRYPTO_AUTH_SIZE_ENUM_9_BYTES	8
+#define CRYPTO_AUTH_SIZE_ENUM_10_BYTES	9
+#define CRYPTO_AUTH_SIZE_ENUM_11_BYTES	10
+#define CRYPTO_AUTH_SIZE_ENUM_12_BYTES	11
+#define CRYPTO_AUTH_SIZE_ENUM_13_BYTES	12
+#define CRYPTO_AUTH_SIZE_ENUM_14_BYTES	13
+#define CRYPTO_AUTH_SIZE_ENUM_15_BYTES	14
+#define CRYPTO_AUTH_SIZE_ENUM_16_BYTES	15
+
+
+#define CRYPTO_AUTH_MODE			6 /* bit 8 .. 6*/
+#define CRYPTO_AUTH_MODE_MASK			(0x7 << CRYPTO_AUTH_MODE)
+#define CRYPTO_AUTH_MODE_HASH	0
+#define CRYPTO_AUTH_MODE_HMAC	1
+#define CRYPTO_AUTH_MODE_CCM	0
+#define CRYPTO_AUTH_MODE_CMAC	1
+
+#define CRYPTO_AUTH_KEY_SIZE			3  /* bit 5 .. 3*/
+#define CRYPTO_AUTH_KEY_SIZE_MASK		(0x7 << CRYPTO_AUTH_KEY_SIZE)
+#define CRYPTO_AUTH_KEY_SZ_AES128	0
+#define CRYPTO_AUTH_KEY_SZ_AES256	2
+
+#define CRYPTO_AUTH_ALG				0 /* bit 2 .. 0*/
+#define CRYPTO_AUTH_ALG_MASK			7
+#define CRYPTO_AUTH_ALG_NONE	0
+#define CRYPTO_AUTH_ALG_SHA	1
+#define CRYPTO_AUTH_ALG_AES	2
+#define CRYPTO_AUTH_ALG_KASUMI	3
+#define CRYPTO_AUTH_ALG_SNOW3G	4
+#define CRYPTO_AUTH_ALG_ZUC	5
+
+/* encr_xts_du_size reg */
+#define CRYPTO_ENCR_XTS_DU_SIZE			0 /* bit 19-0  */
+#define CRYPTO_ENCR_XTS_DU_SIZE_MASK		0xfffff
+
+/* encr_seg_cfg reg */
+#define CRYPTO_F8_KEYSTREAM_ENABLE		17/* bit */
+#define CRYPTO_F8_KEYSTREAM_DISABLED	0
+#define CRYPTO_F8_KEYSTREAM_ENABLED	1
+
+#define CRYPTO_F8_DIRECTION			16 /* bit */
+#define CRYPTO_F8_DIRECTION_UPLINK	0
+#define CRYPTO_F8_DIRECTION_DOWNLINK	1
+
+
+#define CRYPTO_USE_PIPE_KEY_ENCR		15 /* bit */
+#define CRYPTO_USE_PIPE_KEY_ENCR_ENABLED	1
+#define CRYPTO_USE_KEY_REGISTERS		0
+
+
+#define CRYPTO_USE_HW_KEY_ENCR			14
+#define CRYPTO_USE_KEY_REG	0
+#define CRYPTO_USE_HW_KEY	1
+
+#define CRYPTO_LAST_CCM				13
+#define CRYPTO_LAST_CCM_XFR	1
+#define CRYPTO_INTERM_CCM_XFR	0
+
+
+#define CRYPTO_CNTR_ALG				11 /* bit 12-11 */
+#define CRYPTO_CNTR_ALG_MASK			(3 << CRYPTO_CNTR_ALG)
+#define CRYPTO_CNTR_ALG_NIST	0
+
+#define CRYPTO_ENCODE				10
+
+#define CRYPTO_ENCR_MODE			6 /* bit 9-6 */
+#define CRYPTO_ENCR_MODE_MASK			(0xF << CRYPTO_ENCR_MODE)
+/* only valid when AES */
+#define CRYPTO_ENCR_MODE_ECB	0
+#define CRYPTO_ENCR_MODE_CBC	1
+#define CRYPTO_ENCR_MODE_CTR	2
+#define CRYPTO_ENCR_MODE_XTS	3
+#define CRYPTO_ENCR_MODE_CCM	4
+
+#define CRYPTO_ENCR_KEY_SZ			3 /* bit 5-3 */
+#define CRYPTO_ENCR_KEY_SZ_MASK			(7 << CRYPTO_ENCR_KEY_SZ)
+#define CRYPTO_ENCR_KEY_SZ_DES		0
+#define CRYPTO_ENCR_KEY_SZ_3DES		1
+#define CRYPTO_ENCR_KEY_SZ_AES128	0
+#define CRYPTO_ENCR_KEY_SZ_AES256	2
+
+#define CRYPTO_ENCR_ALG				0 /* bit 2-0 */
+#define CRYPTO_ENCR_ALG_MASK			(7 << CRYPTO_ENCR_ALG)
+#define CRYPTO_ENCR_ALG_NONE		0
+#define CRYPTO_ENCR_ALG_DES		1
+#define CRYPTO_ENCR_ALG_AES		2
+#define CRYPTO_ENCR_ALG_KASUMI		4
+#define CRYPTO_ENCR_ALG_SNOW_3G		5
+#define CRYPTO_ENCR_ALG_ZUC		6
+
+/* goproc reg */
+#define CRYPTO_GO				0
+#define CRYPTO_CLR_CNTXT			1
+#define CRYPTO_RESULTS_DUMP			2
+
+/*  F8 definition of CRYPTO_ENCR_CNTR1_IV1 REG  */
+#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT		16	/* bit 31 - 16 */
+#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT_MASK \
+		(0xffff << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT)
+
+#define CRYPTO_CNTR1_IV1_REG_F8_BEARER		0	/* bit 4 - 0 */
+#define CRYPTO_CNTR1_IV1_REG_F8_BEARER_MASK \
+		(0x1f << CRYPTO_CNTR1_IV1_REG_F8_BEARER)
+
+/* F9 definition of CRYPTO_AUTH_IV4 REG */
+#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS	0	/* bit 2 - 0 */
+#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS_MASK \
+		(0x7  << CRYPTO_AUTH_IV4_REG_F9_VALID_BIS)
+
+/* engines_avail */
+#define CRYPTO_ENCR_AES_SEL			0
+#define CRYPTO_DES_SEL				1
+#define CRYPTO_ENCR_SNOW3G_SEL			2
+#define CRYPTO_ENCR_KASUMI_SEL			3
+#define CRYPTO_SHA_SEL				4
+#define CRYPTO_SHA512_SEL			5
+#define CRYPTO_AUTH_AES_SEL			6
+#define CRYPTO_AUTH_SNOW3G_SEL			7
+#define CRYPTO_AUTH_KASUMI_SEL			8
+#define CRYPTO_BAM_PIPE_SETS			9	/* bit 12 - 9 */
+#define CRYPTO_AXI_WR_BEATS			13	/* bit 18 - 13 */
+#define CRYPTO_AXI_RD_BEATS			19	/* bit 24 - 19 */
+#define CRYPTO_ENCR_ZUC_SEL			26
+#define CRYPTO_AUTH_ZUC_SEL			27
+#define CRYPTO_ZUC_ENABLE			28
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_ */
diff --git a/drivers/devfreq/Kconfig b/drivers/devfreq/Kconfig
index 6476c5e..b8effac 100644
--- a/drivers/devfreq/Kconfig
+++ b/drivers/devfreq/Kconfig
@@ -156,6 +156,15 @@
 	   governor is unlikely to be useful for other
 	   devices.
 
+config DEVFREQ_GOV_QCOM_GPUBW_MON
+	tristate "GPU BW voting governor"
+	depends on DEVFREQ_GOV_QCOM_ADRENO_TZ
+	help
+	  This governor works together with Adreno Trustzone governor,
+	  and select bus frequency votes using an "on-demand" alorithm.
+	  This governor is unlikely to be useful for non-Adreno
+	  devices.
+
 config ARM_EXYNOS_BUS_DEVFREQ
 	tristate "ARM EXYNOS Generic Memory Bus DEVFREQ Driver"
 	depends on ARCH_EXYNOS || COMPILE_TEST
diff --git a/drivers/devfreq/Makefile b/drivers/devfreq/Makefile
index f488f12..f248e02 100644
--- a/drivers/devfreq/Makefile
+++ b/drivers/devfreq/Makefile
@@ -8,6 +8,7 @@
 obj-$(CONFIG_DEVFREQ_GOV_CPUFREQ)	+= governor_cpufreq.o
 obj-$(CONFIG_DEVFREQ_GOV_QCOM_ADRENO_TZ) += governor_msm_adreno_tz.o
 obj-$(CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON) += governor_bw_vbif.o
+obj-$(CONFIG_DEVFREQ_GOV_QCOM_GPUBW_MON) += governor_gpubw_mon.o
 obj-$(CONFIG_QCOM_BIMC_BWMON)		+= bimc-bwmon.o
 obj-$(CONFIG_ARM_MEMLAT_MON)		+= arm-memlat-mon.o
 obj-$(CONFIG_QCOMCCI_HWMON)		+= msmcci-hwmon.o
diff --git a/drivers/devfreq/devfreq_trace.h b/drivers/devfreq/devfreq_trace.h
new file mode 100644
index 0000000..7dacc0e
--- /dev/null
+++ b/drivers/devfreq/devfreq_trace.h
@@ -0,0 +1,44 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_DEVFREQ_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _DEVFREQ_TRACE_H
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM devfreq
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE devfreq_trace
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(devfreq_msg,
+	TP_PROTO(const char *msg),
+	TP_ARGS(msg),
+	TP_STRUCT__entry(
+		__string(msg, msg)
+	),
+	TP_fast_assign(
+		__assign_str(msg, msg);
+	),
+	TP_printk(
+		"%s", __get_str(msg)
+	)
+);
+
+#endif /* _DEVFREQ_TRACE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+
diff --git a/drivers/devfreq/governor_gpubw_mon.c b/drivers/devfreq/governor_gpubw_mon.c
new file mode 100644
index 0000000..9c24eef
--- /dev/null
+++ b/drivers/devfreq/governor_gpubw_mon.c
@@ -0,0 +1,266 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/devfreq.h>
+#include <linux/module.h>
+#include <linux/msm_adreno_devfreq.h>
+#include <linux/slab.h>
+
+#include "devfreq_trace.h"
+#include "governor.h"
+
+#define MIN_BUSY                1000
+#define LONG_FLOOR              50000
+#define HIST                    5
+#define TARGET                  80
+#define CAP                     75
+/* AB vote is in multiple of BW_STEP Mega bytes */
+#define BW_STEP                 160
+
+static void _update_cutoff(struct devfreq_msm_adreno_tz_data *priv,
+					unsigned int norm_max)
+{
+	int i;
+
+	priv->bus.max = norm_max;
+	for (i = 0; i < priv->bus.num; i++) {
+		priv->bus.up[i] = priv->bus.p_up[i] * norm_max / 100;
+		priv->bus.down[i] = priv->bus.p_down[i] * norm_max / 100;
+	}
+}
+
+static inline int devfreq_get_freq_level(struct devfreq *devfreq,
+	unsigned long freq)
+{
+	int lev;
+
+	for (lev = 0; lev < devfreq->profile->max_state; lev++)
+		if (freq == devfreq->profile->freq_table[lev])
+			return lev;
+
+	return -EINVAL;
+}
+
+static int devfreq_gpubw_get_target(struct devfreq *df,
+				unsigned long *freq)
+{
+
+	struct devfreq_msm_adreno_tz_data *priv = df->data;
+	struct msm_busmon_extended_profile *bus_profile = container_of(
+					(df->profile),
+					struct msm_busmon_extended_profile,
+					profile);
+	struct devfreq_dev_status stats;
+	struct xstats b;
+	int result;
+	int level = 0;
+	int act_level;
+	int norm_cycles;
+	int gpu_percent;
+	/*
+	 * Normalized AB should at max usage be the gpu_bimc frequency in MHz.
+	 * Start with a reasonable value and let the system push it up to max.
+	 */
+	static int norm_ab_max = 300;
+	int norm_ab;
+	unsigned long ab_mbytes = 0;
+
+	if (priv == NULL)
+		return 0;
+
+	stats.private_data = &b;
+
+	result = df->profile->get_dev_status(df->dev.parent, &stats);
+
+	*freq = stats.current_frequency;
+
+	priv->bus.total_time += stats.total_time;
+	priv->bus.gpu_time += stats.busy_time;
+	priv->bus.ram_time += b.ram_time;
+	priv->bus.ram_wait += b.ram_wait;
+
+	level = devfreq_get_freq_level(df, stats.current_frequency);
+
+	if (priv->bus.total_time < LONG_FLOOR)
+		return result;
+
+	norm_cycles = (unsigned int)(priv->bus.ram_time + priv->bus.ram_wait) /
+			(unsigned int) priv->bus.total_time;
+	gpu_percent = (100 * (unsigned int)priv->bus.gpu_time) /
+			(unsigned int) priv->bus.total_time;
+
+	/*
+	 * If there's a new high watermark, update the cutoffs and send the
+	 * FAST hint.  Otherwise check the current value against the current
+	 * cutoffs.
+	 */
+	if (norm_cycles > priv->bus.max) {
+		_update_cutoff(priv, norm_cycles);
+		bus_profile->flag = DEVFREQ_FLAG_FAST_HINT;
+	} else {
+		/* GPU votes for IB not AB so don't under vote the system */
+		norm_cycles = (100 * norm_cycles) / TARGET;
+		act_level = priv->bus.index[level] + b.mod;
+		act_level = (act_level < 0) ? 0 : act_level;
+		act_level = (act_level >= priv->bus.num) ?
+		(priv->bus.num - 1) : act_level;
+		if (norm_cycles > priv->bus.up[act_level] &&
+				gpu_percent > CAP)
+			bus_profile->flag = DEVFREQ_FLAG_FAST_HINT;
+		else if (norm_cycles < priv->bus.down[act_level] && level)
+			bus_profile->flag = DEVFREQ_FLAG_SLOW_HINT;
+	}
+
+	/* Calculate the AB vote based on bus width if defined */
+	if (priv->bus.width) {
+		norm_ab =  (unsigned int)priv->bus.ram_time /
+			(unsigned int) priv->bus.total_time;
+		/* Calculate AB in Mega Bytes and roundup in BW_STEP */
+		ab_mbytes = (norm_ab * priv->bus.width * 1000000ULL) >> 20;
+		bus_profile->ab_mbytes = roundup(ab_mbytes, BW_STEP);
+	} else if (bus_profile->flag) {
+		/* Re-calculate the AB percentage for a new IB vote */
+		norm_ab =  (unsigned int)priv->bus.ram_time /
+			(unsigned int) priv->bus.total_time;
+		if (norm_ab > norm_ab_max)
+			norm_ab_max = norm_ab;
+		bus_profile->percent_ab = (100 * norm_ab) / norm_ab_max;
+	}
+
+	priv->bus.total_time = 0;
+	priv->bus.gpu_time = 0;
+	priv->bus.ram_time = 0;
+	priv->bus.ram_wait = 0;
+
+	return result;
+}
+
+static int gpubw_start(struct devfreq *devfreq)
+{
+	struct devfreq_msm_adreno_tz_data *priv;
+
+	struct msm_busmon_extended_profile *bus_profile = container_of(
+					(devfreq->profile),
+					struct msm_busmon_extended_profile,
+					profile);
+	unsigned int t1, t2 = 2 * HIST;
+	int i, bus_size;
+
+
+	devfreq->data = bus_profile->private_data;
+	priv = devfreq->data;
+
+	bus_size = sizeof(u32) * priv->bus.num;
+	priv->bus.up = kzalloc(bus_size, GFP_KERNEL);
+	priv->bus.down = kzalloc(bus_size, GFP_KERNEL);
+	priv->bus.p_up = kzalloc(bus_size, GFP_KERNEL);
+	priv->bus.p_down = kzalloc(bus_size, GFP_KERNEL);
+	if (priv->bus.up == NULL || priv->bus.down == NULL ||
+		priv->bus.p_up == NULL || priv->bus.p_down == NULL)
+		return -ENOMEM;
+
+	/* Set up the cut-over percentages for the bus calculation. */
+	for (i = 0; i < priv->bus.num; i++) {
+		t1 = (u32)(100 * priv->bus.ib[i]) /
+				(u32)priv->bus.ib[priv->bus.num - 1];
+		priv->bus.p_up[i] = t1 - HIST;
+		priv->bus.p_down[i] = t2 - 2 * HIST;
+		t2 = t1;
+	}
+	/* Set the upper-most and lower-most bounds correctly. */
+	priv->bus.p_down[0] = 0;
+	priv->bus.p_down[1] = (priv->bus.p_down[1] > (2 * HIST)) ?
+				priv->bus.p_down[1] : (2 * HIST);
+	if (priv->bus.num >= 1)
+		priv->bus.p_up[priv->bus.num - 1] = 100;
+	_update_cutoff(priv, priv->bus.max);
+
+	return 0;
+}
+
+static int gpubw_stop(struct devfreq *devfreq)
+{
+	struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
+
+	if (priv) {
+		kfree(priv->bus.up);
+		kfree(priv->bus.down);
+		kfree(priv->bus.p_up);
+		kfree(priv->bus.p_down);
+	}
+	devfreq->data = NULL;
+	return 0;
+}
+
+static int devfreq_gpubw_event_handler(struct devfreq *devfreq,
+				unsigned int event, void *data)
+{
+	int result = 0;
+	unsigned long freq;
+
+	mutex_lock(&devfreq->lock);
+	freq = devfreq->previous_freq;
+	switch (event) {
+	case DEVFREQ_GOV_START:
+		result = gpubw_start(devfreq);
+		break;
+	case DEVFREQ_GOV_STOP:
+		result = gpubw_stop(devfreq);
+		break;
+	case DEVFREQ_GOV_RESUME:
+		/* TODO ..... */
+		/* ret = update_devfreq(devfreq); */
+		break;
+	case DEVFREQ_GOV_SUSPEND:
+		{
+			struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
+
+			priv->bus.total_time = 0;
+			priv->bus.gpu_time = 0;
+			priv->bus.ram_time = 0;
+		}
+		break;
+	default:
+		result = 0;
+		break;
+	}
+	mutex_unlock(&devfreq->lock);
+	return result;
+}
+
+static struct devfreq_governor devfreq_gpubw = {
+	.name = "gpubw_mon",
+	.get_target_freq = devfreq_gpubw_get_target,
+	.event_handler = devfreq_gpubw_event_handler,
+};
+
+static int __init devfreq_gpubw_init(void)
+{
+	return devfreq_add_governor(&devfreq_gpubw);
+}
+subsys_initcall(devfreq_gpubw_init);
+
+static void __exit devfreq_gpubw_exit(void)
+{
+	int ret;
+
+	ret = devfreq_remove_governor(&devfreq_gpubw);
+	if (ret)
+		pr_err("%s: failed remove governor %d\n", __func__, ret);
+
+}
+module_exit(devfreq_gpubw_exit);
+
+MODULE_DESCRIPTION("GPU bus bandwidth voting driver. Uses VBIF counters");
+MODULE_LICENSE("GPL v2");
+
diff --git a/drivers/edac/kryo3xx_arm64_edac.c b/drivers/edac/kryo3xx_arm64_edac.c
index 7e2aadc..4ac880b 100644
--- a/drivers/edac/kryo3xx_arm64_edac.c
+++ b/drivers/edac/kryo3xx_arm64_edac.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -60,6 +60,13 @@
 	asm volatile("msr s3_0_c5_c4_1, %0" : : "r" (val));
 }
 
+static inline void set_errxmisc_overflow(void)
+{
+	u64 val = 0x7F7F00000000;
+
+	asm volatile("msr s3_0_c5_c5_0, %0" : : "r" (val));
+}
+
 static inline void write_errselr_el1(u64 val)
 {
 	asm volatile("msr s3_0_c5_c3_1, %0" : : "r" (val));
@@ -319,9 +326,7 @@
 
 static irqreturn_t kryo3xx_l1_l2_handler(int irq, void *drvdata)
 {
-	struct erp_drvdata *drv = *(struct erp_drvdata **)(drvdata);
-
-	kryo3xx_check_l1_l2_ecc(drv->edev_ctl);
+	kryo3xx_check_l1_l2_ecc(panic_handler_drvdata->edev_ctl);
 	return IRQ_HANDLED;
 }
 
@@ -334,14 +339,24 @@
 	return IRQ_HANDLED;
 }
 
+static void initialize_registers(void *info)
+{
+	set_errxctlr_el1();
+	set_errxmisc_overflow();
+}
+
 static int kryo3xx_cpu_erp_probe(struct platform_device *pdev)
 {
 	struct device *dev = &pdev->dev;
 	struct erp_drvdata *drv;
 	int rc = 0;
 	int fail = 0;
+	int cpu;
 
-	set_errxctlr_el1();
+	for_each_possible_cpu(cpu)
+		smp_call_function_single(cpu, initialize_registers, NULL, 1);
+
+
 	drv = devm_kzalloc(dev, sizeof(*drv), GFP_KERNEL);
 
 	if (!drv)
diff --git a/drivers/extcon/extcon-usb-gpio.c b/drivers/extcon/extcon-usb-gpio.c
index a27d350..2e093c3 100644
--- a/drivers/extcon/extcon-usb-gpio.c
+++ b/drivers/extcon/extcon-usb-gpio.c
@@ -24,7 +24,6 @@
 #include <linux/module.h>
 #include <linux/of_gpio.h>
 #include <linux/platform_device.h>
-#include <linux/pm_wakeirq.h>
 #include <linux/slab.h>
 #include <linux/workqueue.h>
 #include <linux/acpi.h>
@@ -36,7 +35,9 @@
 	struct extcon_dev *edev;
 
 	struct gpio_desc *id_gpiod;
+	struct gpio_desc *vbus_gpiod;
 	int id_irq;
+	int vbus_irq;
 
 	unsigned long debounce_jiffies;
 	struct delayed_work wq_detcable;
@@ -48,31 +49,47 @@
 	EXTCON_NONE,
 };
 
+/*
+ * "USB" = VBUS and "USB-HOST" = !ID, so we have:
+ * Both "USB" and "USB-HOST" can't be set as active at the
+ * same time so if "USB-HOST" is active (i.e. ID is 0)  we keep "USB" inactive
+ * even if VBUS is on.
+ *
+ *  State              |    ID   |   VBUS
+ * ----------------------------------------
+ *  [1] USB            |    H    |    H
+ *  [2] none           |    H    |    L
+ *  [3] USB-HOST       |    L    |    H
+ *  [4] USB-HOST       |    L    |    L
+ *
+ * In case we have only one of these signals:
+ * - VBUS only - we want to distinguish between [1] and [2], so ID is always 1.
+ * - ID only - we want to distinguish between [1] and [4], so VBUS = ID.
+ */
 static void usb_extcon_detect_cable(struct work_struct *work)
 {
-	int id;
+	int id, vbus;
 	struct usb_extcon_info *info = container_of(to_delayed_work(work),
 						    struct usb_extcon_info,
 						    wq_detcable);
 
-	/* check ID and update cable state */
-	id = gpiod_get_value_cansleep(info->id_gpiod);
-	if (id) {
-		/*
-		 * ID = 1 means USB HOST cable detached.
-		 * As we don't have event for USB peripheral cable attached,
-		 * we simulate USB peripheral attach here.
-		 */
+	/* check ID and VBUS and update cable state */
+	id = info->id_gpiod ?
+		gpiod_get_value_cansleep(info->id_gpiod) : 1;
+	vbus = info->vbus_gpiod ?
+		gpiod_get_value_cansleep(info->vbus_gpiod) : id;
+
+	/* at first we clean states which are no longer active */
+	if (id)
 		extcon_set_state_sync(info->edev, EXTCON_USB_HOST, false);
-		extcon_set_state_sync(info->edev, EXTCON_USB, true);
-	} else {
-		/*
-		 * ID = 0 means USB HOST cable attached.
-		 * As we don't have event for USB peripheral cable detached,
-		 * we simulate USB peripheral detach here.
-		 */
+	if (!vbus)
 		extcon_set_state_sync(info->edev, EXTCON_USB, false);
+
+	if (!id) {
 		extcon_set_state_sync(info->edev, EXTCON_USB_HOST, true);
+	} else {
+		if (vbus)
+			extcon_set_state_sync(info->edev, EXTCON_USB, true);
 	}
 }
 
@@ -101,12 +118,21 @@
 		return -ENOMEM;
 
 	info->dev = dev;
-	info->id_gpiod = devm_gpiod_get(&pdev->dev, "id", GPIOD_IN);
-	if (IS_ERR(info->id_gpiod)) {
-		dev_err(dev, "failed to get ID GPIO\n");
-		return PTR_ERR(info->id_gpiod);
+	info->id_gpiod = devm_gpiod_get_optional(&pdev->dev, "id", GPIOD_IN);
+	info->vbus_gpiod = devm_gpiod_get_optional(&pdev->dev, "vbus",
+						   GPIOD_IN);
+
+	if (!info->id_gpiod && !info->vbus_gpiod) {
+		dev_err(dev, "failed to get gpios\n");
+		return -ENODEV;
 	}
 
+	if (IS_ERR(info->id_gpiod))
+		return PTR_ERR(info->id_gpiod);
+
+	if (IS_ERR(info->vbus_gpiod))
+		return PTR_ERR(info->vbus_gpiod);
+
 	info->edev = devm_extcon_dev_allocate(dev, usb_extcon_cable);
 	if (IS_ERR(info->edev)) {
 		dev_err(dev, "failed to allocate extcon device\n");
@@ -119,32 +145,58 @@
 		return ret;
 	}
 
-	ret = gpiod_set_debounce(info->id_gpiod,
-				 USB_GPIO_DEBOUNCE_MS * 1000);
+	if (info->id_gpiod)
+		ret = gpiod_set_debounce(info->id_gpiod,
+					 USB_GPIO_DEBOUNCE_MS * 1000);
+	if (!ret && info->vbus_gpiod)
+		ret = gpiod_set_debounce(info->vbus_gpiod,
+					 USB_GPIO_DEBOUNCE_MS * 1000);
+
 	if (ret < 0)
 		info->debounce_jiffies = msecs_to_jiffies(USB_GPIO_DEBOUNCE_MS);
 
 	INIT_DELAYED_WORK(&info->wq_detcable, usb_extcon_detect_cable);
 
-	info->id_irq = gpiod_to_irq(info->id_gpiod);
-	if (info->id_irq < 0) {
-		dev_err(dev, "failed to get ID IRQ\n");
-		return info->id_irq;
+	if (info->id_gpiod) {
+		info->id_irq = gpiod_to_irq(info->id_gpiod);
+		if (info->id_irq < 0) {
+			dev_err(dev, "failed to get ID IRQ\n");
+			return info->id_irq;
+		}
+
+		ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
+						usb_irq_handler,
+						IRQF_TRIGGER_RISING |
+						IRQF_TRIGGER_FALLING |
+						IRQF_ONESHOT,
+						pdev->name, info);
+		if (ret < 0) {
+			dev_err(dev, "failed to request handler for ID IRQ\n");
+			return ret;
+		}
 	}
 
-	ret = devm_request_threaded_irq(dev, info->id_irq, NULL,
-					usb_irq_handler,
-					IRQF_TRIGGER_RISING |
-					IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
-					pdev->name, info);
-	if (ret < 0) {
-		dev_err(dev, "failed to request handler for ID IRQ\n");
-		return ret;
+	if (info->vbus_gpiod) {
+		info->vbus_irq = gpiod_to_irq(info->vbus_gpiod);
+		if (info->vbus_irq < 0) {
+			dev_err(dev, "failed to get VBUS IRQ\n");
+			return info->vbus_irq;
+		}
+
+		ret = devm_request_threaded_irq(dev, info->vbus_irq, NULL,
+						usb_irq_handler,
+						IRQF_TRIGGER_RISING |
+						IRQF_TRIGGER_FALLING |
+						IRQF_ONESHOT,
+						pdev->name, info);
+		if (ret < 0) {
+			dev_err(dev, "failed to request handler for VBUS IRQ\n");
+			return ret;
+		}
 	}
 
 	platform_set_drvdata(pdev, info);
 	device_init_wakeup(dev, true);
-	dev_pm_set_wake_irq(dev, info->id_irq);
 
 	/* Perform initial detection */
 	usb_extcon_detect_cable(&info->wq_detcable.work);
@@ -157,8 +209,6 @@
 	struct usb_extcon_info *info = platform_get_drvdata(pdev);
 
 	cancel_delayed_work_sync(&info->wq_detcable);
-
-	dev_pm_clear_wake_irq(&pdev->dev);
 	device_init_wakeup(&pdev->dev, false);
 
 	return 0;
@@ -170,12 +220,32 @@
 	struct usb_extcon_info *info = dev_get_drvdata(dev);
 	int ret = 0;
 
+	if (device_may_wakeup(dev)) {
+		if (info->id_gpiod) {
+			ret = enable_irq_wake(info->id_irq);
+			if (ret)
+				return ret;
+		}
+		if (info->vbus_gpiod) {
+			ret = enable_irq_wake(info->vbus_irq);
+			if (ret) {
+				if (info->id_gpiod)
+					disable_irq_wake(info->id_irq);
+
+				return ret;
+			}
+		}
+	}
+
 	/*
 	 * We don't want to process any IRQs after this point
 	 * as GPIOs used behind I2C subsystem might not be
 	 * accessible until resume completes. So disable IRQ.
 	 */
-	disable_irq(info->id_irq);
+	if (info->id_gpiod)
+		disable_irq(info->id_irq);
+	if (info->vbus_gpiod)
+		disable_irq(info->vbus_irq);
 
 	return ret;
 }
@@ -185,7 +255,28 @@
 	struct usb_extcon_info *info = dev_get_drvdata(dev);
 	int ret = 0;
 
-	enable_irq(info->id_irq);
+	if (device_may_wakeup(dev)) {
+		if (info->id_gpiod) {
+			ret = disable_irq_wake(info->id_irq);
+			if (ret)
+				return ret;
+		}
+		if (info->vbus_gpiod) {
+			ret = disable_irq_wake(info->vbus_irq);
+			if (ret) {
+				if (info->id_gpiod)
+					enable_irq_wake(info->id_irq);
+
+				return ret;
+			}
+		}
+	}
+
+	if (info->id_gpiod)
+		enable_irq(info->id_irq);
+	if (info->vbus_gpiod)
+		enable_irq(info->vbus_irq);
+
 	if (!device_may_wakeup(dev))
 		queue_delayed_work(system_power_efficient_wq,
 				   &info->wq_detcable, 0);
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index bca172d..8e2eb35 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -210,5 +210,6 @@
 source "drivers/firmware/google/Kconfig"
 source "drivers/firmware/efi/Kconfig"
 source "drivers/firmware/meson/Kconfig"
+source "drivers/firmware/qcom/Kconfig"
 
 endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 898ac41..b1c1b21 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -26,3 +26,4 @@
 obj-$(CONFIG_GOOGLE_FIRMWARE)	+= google/
 obj-$(CONFIG_EFI)		+= efi/
 obj-$(CONFIG_UEFI_CPER)		+= efi/
+obj-$(CONFIG_MSM_TZ_LOG)	+= qcom/
diff --git a/drivers/firmware/qcom/Kconfig b/drivers/firmware/qcom/Kconfig
new file mode 100644
index 0000000..61c7974
--- /dev/null
+++ b/drivers/firmware/qcom/Kconfig
@@ -0,0 +1,7 @@
+config MSM_TZ_LOG
+        tristate "MSM Trust Zone (TZ) Log Driver"
+        depends on DEBUG_FS
+        help
+          This option enables a driver with a debugfs interface for messages
+          produced by the Secure code (Trust zone). These messages provide
+          diagnostic information about TZ operation.
diff --git a/drivers/firmware/qcom/Makefile b/drivers/firmware/qcom/Makefile
new file mode 100644
index 0000000..635f60c
--- /dev/null
+++ b/drivers/firmware/qcom/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MSM_TZ_LOG) += tz_log.o
diff --git a/drivers/firmware/qcom/tz_log.c b/drivers/firmware/qcom/tz_log.c
new file mode 100644
index 0000000..1b51d08
--- /dev/null
+++ b/drivers/firmware/qcom/tz_log.c
@@ -0,0 +1,1209 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/msm_ion.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/of.h>
+
+#include <soc/qcom/scm.h>
+#include <soc/qcom/qseecomi.h>
+
+/* QSEE_LOG_BUF_SIZE = 32K */
+#define QSEE_LOG_BUF_SIZE 0x8000
+
+
+/* TZ Diagnostic Area legacy version number */
+#define TZBSP_DIAG_MAJOR_VERSION_LEGACY	2
+/*
+ * Preprocessor Definitions and Constants
+ */
+#define TZBSP_MAX_CPU_COUNT 0x08
+/*
+ * Number of VMID Tables
+ */
+#define TZBSP_DIAG_NUM_OF_VMID 16
+/*
+ * VMID Description length
+ */
+#define TZBSP_DIAG_VMID_DESC_LEN 7
+/*
+ * Number of Interrupts
+ */
+#define TZBSP_DIAG_INT_NUM  32
+/*
+ * Length of descriptive name associated with Interrupt
+ */
+#define TZBSP_MAX_INT_DESC 16
+/*
+ * TZ 3.X version info
+ */
+#define QSEE_VERSION_TZ_3_X 0x800000
+/*
+ * TZ 4.X version info
+ */
+#define QSEE_VERSION_TZ_4_X 0x1000000
+
+#define TZBSP_AES_256_ENCRYPTED_KEY_SIZE 256
+#define TZBSP_NONCE_LEN 12
+#define TZBSP_TAG_LEN 16
+
+/*
+ * VMID Table
+ */
+struct tzdbg_vmid_t {
+	uint8_t vmid; /* Virtual Machine Identifier */
+	uint8_t desc[TZBSP_DIAG_VMID_DESC_LEN];	/* ASCII Text */
+};
+/*
+ * Boot Info Table
+ */
+struct tzdbg_boot_info_t {
+	uint32_t wb_entry_cnt;	/* Warmboot entry CPU Counter */
+	uint32_t wb_exit_cnt;	/* Warmboot exit CPU Counter */
+	uint32_t pc_entry_cnt;	/* Power Collapse entry CPU Counter */
+	uint32_t pc_exit_cnt;	/* Power Collapse exit CPU counter */
+	uint32_t warm_jmp_addr;	/* Last Warmboot Jump Address */
+	uint32_t spare;	/* Reserved for future use. */
+};
+/*
+ * Boot Info Table for 64-bit
+ */
+struct tzdbg_boot_info64_t {
+	uint32_t wb_entry_cnt;  /* Warmboot entry CPU Counter */
+	uint32_t wb_exit_cnt;   /* Warmboot exit CPU Counter */
+	uint32_t pc_entry_cnt;  /* Power Collapse entry CPU Counter */
+	uint32_t pc_exit_cnt;   /* Power Collapse exit CPU counter */
+	uint32_t psci_entry_cnt;/* PSCI syscall entry CPU Counter */
+	uint32_t psci_exit_cnt;   /* PSCI syscall exit CPU Counter */
+	uint64_t warm_jmp_addr; /* Last Warmboot Jump Address */
+	uint32_t warm_jmp_instr; /* Last Warmboot Jump Address Instruction */
+};
+/*
+ * Reset Info Table
+ */
+struct tzdbg_reset_info_t {
+	uint32_t reset_type;	/* Reset Reason */
+	uint32_t reset_cnt;	/* Number of resets occurred/CPU */
+};
+/*
+ * Interrupt Info Table
+ */
+struct tzdbg_int_t {
+	/*
+	 * Type of Interrupt/exception
+	 */
+	uint16_t int_info;
+	/*
+	 * Availability of the slot
+	 */
+	uint8_t avail;
+	/*
+	 * Reserved for future use
+	 */
+	uint8_t spare;
+	/*
+	 * Interrupt # for IRQ and FIQ
+	 */
+	uint32_t int_num;
+	/*
+	 * ASCII text describing type of interrupt e.g:
+	 * Secure Timer, EBI XPU. This string is always null terminated,
+	 * supporting at most TZBSP_MAX_INT_DESC characters.
+	 * Any additional characters are truncated.
+	 */
+	uint8_t int_desc[TZBSP_MAX_INT_DESC];
+	uint64_t int_count[TZBSP_MAX_CPU_COUNT]; /* # of times seen per CPU */
+};
+
+/*
+ * Interrupt Info Table used in tz version >=4.X
+ */
+struct tzdbg_int_t_tz40 {
+	uint16_t int_info;
+	uint8_t avail;
+	uint8_t spare;
+	uint32_t int_num;
+	uint8_t int_desc[TZBSP_MAX_INT_DESC];
+	uint32_t int_count[TZBSP_MAX_CPU_COUNT]; /* uint32_t in TZ ver >= 4.x*/
+};
+
+/* warm boot reason for cores */
+struct tzbsp_diag_wakeup_info_t {
+	/* Wake source info : APCS_GICC_HPPIR */
+	uint32_t HPPIR;
+	/* Wake source info : APCS_GICC_AHPPIR */
+	uint32_t AHPPIR;
+};
+
+/*
+ * Log ring buffer position
+ */
+struct tzdbg_log_pos_t {
+	uint16_t wrap;
+	uint16_t offset;
+};
+
+ /*
+  * Log ring buffer
+  */
+struct tzdbg_log_t {
+	struct tzdbg_log_pos_t	log_pos;
+	/* open ended array to the end of the 4K IMEM buffer */
+	uint8_t					log_buf[];
+};
+
+/*
+ * Diagnostic Table
+ * Note: This is the reference data structure for tz diagnostic table
+ * supporting TZBSP_MAX_CPU_COUNT, the real diagnostic data is directly
+ * copied into buffer from i/o memory.
+ */
+struct tzdbg_t {
+	uint32_t magic_num;
+	uint32_t version;
+	/*
+	 * Number of CPU's
+	 */
+	uint32_t cpu_count;
+	/*
+	 * Offset of VMID Table
+	 */
+	uint32_t vmid_info_off;
+	/*
+	 * Offset of Boot Table
+	 */
+	uint32_t boot_info_off;
+	/*
+	 * Offset of Reset info Table
+	 */
+	uint32_t reset_info_off;
+	/*
+	 * Offset of Interrupt info Table
+	 */
+	uint32_t int_info_off;
+	/*
+	 * Ring Buffer Offset
+	 */
+	uint32_t ring_off;
+	/*
+	 * Ring Buffer Length
+	 */
+	uint32_t ring_len;
+
+	/* Offset for Wakeup info */
+	uint32_t wakeup_info_off;
+
+	/*
+	 * VMID to EE Mapping
+	 */
+	struct tzdbg_vmid_t vmid_info[TZBSP_DIAG_NUM_OF_VMID];
+	/*
+	 * Boot Info
+	 */
+	struct tzdbg_boot_info_t  boot_info[TZBSP_MAX_CPU_COUNT];
+	/*
+	 * Reset Info
+	 */
+	struct tzdbg_reset_info_t reset_info[TZBSP_MAX_CPU_COUNT];
+	uint32_t num_interrupts;
+	struct tzdbg_int_t  int_info[TZBSP_DIAG_INT_NUM];
+
+	/* Wake up info */
+	struct tzbsp_diag_wakeup_info_t  wakeup_info[TZBSP_MAX_CPU_COUNT];
+
+	uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
+
+	uint8_t nonce[TZBSP_NONCE_LEN];
+
+	uint8_t tag[TZBSP_TAG_LEN];
+
+	/*
+	 * We need at least 2K for the ring buffer
+	 */
+	struct tzdbg_log_t ring_buffer;	/* TZ Ring Buffer */
+};
+
+struct hypdbg_log_pos_t {
+	uint16_t wrap;
+	uint16_t offset;
+};
+
+struct hypdbg_boot_info_t {
+	uint32_t warm_entry_cnt;
+	uint32_t warm_exit_cnt;
+};
+
+struct hypdbg_t {
+	/* Magic Number */
+	uint32_t magic_num;
+
+	/* Number of CPU's */
+	uint32_t cpu_count;
+
+	/* Ring Buffer Offset */
+	uint32_t ring_off;
+
+	/* Ring buffer position mgmt */
+	struct hypdbg_log_pos_t log_pos;
+	uint32_t log_len;
+
+	/* S2 fault numbers */
+	uint32_t s2_fault_counter;
+
+	/* Boot Info */
+	struct hypdbg_boot_info_t boot_info[TZBSP_MAX_CPU_COUNT];
+
+	/* Ring buffer pointer */
+	uint8_t log_buf_p[];
+};
+
+/*
+ * Enumeration order for VMID's
+ */
+enum tzdbg_stats_type {
+	TZDBG_BOOT = 0,
+	TZDBG_RESET,
+	TZDBG_INTERRUPT,
+	TZDBG_VMID,
+	TZDBG_GENERAL,
+	TZDBG_LOG,
+	TZDBG_QSEE_LOG,
+	TZDBG_HYP_GENERAL,
+	TZDBG_HYP_LOG,
+	TZDBG_STATS_MAX
+};
+
+struct tzdbg_stat {
+	char *name;
+	char *data;
+};
+
+struct tzdbg {
+	void __iomem *virt_iobase;
+	void __iomem *hyp_virt_iobase;
+	struct tzdbg_t *diag_buf;
+	struct hypdbg_t *hyp_diag_buf;
+	char *disp_buf;
+	int debug_tz[TZDBG_STATS_MAX];
+	struct tzdbg_stat stat[TZDBG_STATS_MAX];
+	uint32_t hyp_debug_rw_buf_size;
+	bool is_hyplog_enabled;
+	uint32_t tz_version;
+};
+
+static struct tzdbg tzdbg = {
+	.stat[TZDBG_BOOT].name = "boot",
+	.stat[TZDBG_RESET].name = "reset",
+	.stat[TZDBG_INTERRUPT].name = "interrupt",
+	.stat[TZDBG_VMID].name = "vmid",
+	.stat[TZDBG_GENERAL].name = "general",
+	.stat[TZDBG_LOG].name = "log",
+	.stat[TZDBG_QSEE_LOG].name = "qsee_log",
+	.stat[TZDBG_HYP_GENERAL].name = "hyp_general",
+	.stat[TZDBG_HYP_LOG].name = "hyp_log",
+};
+
+static struct tzdbg_log_t *g_qsee_log;
+static uint32_t debug_rw_buf_size;
+
+/*
+ * Debugfs data structure and functions
+ */
+
+static int _disp_tz_general_stats(void)
+{
+	int len = 0;
+
+	len += snprintf(tzdbg.disp_buf + len, debug_rw_buf_size - 1,
+			"   Version        : 0x%x\n"
+			"   Magic Number   : 0x%x\n"
+			"   Number of CPU  : %d\n",
+			tzdbg.diag_buf->version,
+			tzdbg.diag_buf->magic_num,
+			tzdbg.diag_buf->cpu_count);
+	tzdbg.stat[TZDBG_GENERAL].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_vmid_stats(void)
+{
+	int i, num_vmid;
+	int len = 0;
+	struct tzdbg_vmid_t *ptr;
+
+	ptr = (struct tzdbg_vmid_t *)((unsigned char *)tzdbg.diag_buf +
+					tzdbg.diag_buf->vmid_info_off);
+	num_vmid = ((tzdbg.diag_buf->boot_info_off -
+				tzdbg.diag_buf->vmid_info_off)/
+					(sizeof(struct tzdbg_vmid_t)));
+
+	for (i = 0; i < num_vmid; i++) {
+		if (ptr->vmid < 0xFF) {
+			len += snprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"   0x%x        %s\n",
+				(uint32_t)ptr->vmid, (uint8_t *)ptr->desc);
+		}
+		if (len > (debug_rw_buf_size - 1)) {
+			pr_warn("%s: Cannot fit all info into the buffer\n",
+								__func__);
+			break;
+		}
+		ptr++;
+	}
+
+	tzdbg.stat[TZDBG_VMID].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_boot_stats(void)
+{
+	int i;
+	int len = 0;
+	struct tzdbg_boot_info_t *ptr = NULL;
+	struct tzdbg_boot_info64_t *ptr_64 = NULL;
+
+	pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
+	if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) {
+		ptr_64 = (struct tzdbg_boot_info64_t *)((unsigned char *)
+			tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off);
+	} else {
+		ptr = (struct tzdbg_boot_info_t *)((unsigned char *)
+			tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off);
+	}
+
+	for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) {
+		if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) {
+			len += snprintf(tzdbg.disp_buf + len,
+					(debug_rw_buf_size - 1) - len,
+					"  CPU #: %d\n"
+					"     Warmboot jump address : 0x%llx\n"
+					"     Warmboot entry CPU counter : 0x%x\n"
+					"     Warmboot exit CPU counter : 0x%x\n"
+					"     Power Collapse entry CPU counter : 0x%x\n"
+					"     Power Collapse exit CPU counter : 0x%x\n"
+					"     Psci entry CPU counter : 0x%x\n"
+					"     Psci exit CPU counter : 0x%x\n"
+					"     Warmboot Jump Address Instruction : 0x%x\n",
+					i, (uint64_t)ptr_64->warm_jmp_addr,
+					ptr_64->wb_entry_cnt,
+					ptr_64->wb_exit_cnt,
+					ptr_64->pc_entry_cnt,
+					ptr_64->pc_exit_cnt,
+					ptr_64->psci_entry_cnt,
+					ptr_64->psci_exit_cnt,
+					ptr_64->warm_jmp_instr);
+
+			if (len > (debug_rw_buf_size - 1)) {
+				pr_warn("%s: Cannot fit all info into the buffer\n",
+						__func__);
+				break;
+			}
+			ptr_64++;
+		} else {
+			len += snprintf(tzdbg.disp_buf + len,
+					(debug_rw_buf_size - 1) - len,
+					"  CPU #: %d\n"
+					"     Warmboot jump address     : 0x%x\n"
+					"     Warmboot entry CPU counter: 0x%x\n"
+					"     Warmboot exit CPU counter : 0x%x\n"
+					"     Power Collapse entry CPU counter: 0x%x\n"
+					"     Power Collapse exit CPU counter : 0x%x\n",
+					i, ptr->warm_jmp_addr,
+					ptr->wb_entry_cnt,
+					ptr->wb_exit_cnt,
+					ptr->pc_entry_cnt,
+					ptr->pc_exit_cnt);
+
+			if (len > (debug_rw_buf_size - 1)) {
+				pr_warn("%s: Cannot fit all info into the buffer\n",
+						__func__);
+				break;
+			}
+			ptr++;
+		}
+	}
+	tzdbg.stat[TZDBG_BOOT].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_reset_stats(void)
+{
+	int i;
+	int len = 0;
+	struct tzdbg_reset_info_t *ptr;
+
+	ptr = (struct tzdbg_reset_info_t *)((unsigned char *)tzdbg.diag_buf +
+					tzdbg.diag_buf->reset_info_off);
+
+	for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) {
+		len += snprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"  CPU #: %d\n"
+				"     Reset Type (reason)       : 0x%x\n"
+				"     Reset counter             : 0x%x\n",
+				i, ptr->reset_type, ptr->reset_cnt);
+
+		if (len > (debug_rw_buf_size - 1)) {
+			pr_warn("%s: Cannot fit all info into the buffer\n",
+								__func__);
+			break;
+		}
+
+		ptr++;
+	}
+	tzdbg.stat[TZDBG_RESET].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_interrupt_stats(void)
+{
+	int i, j, int_info_size;
+	int len = 0;
+	int *num_int;
+	unsigned char *ptr;
+	struct tzdbg_int_t *tzdbg_ptr;
+	struct tzdbg_int_t_tz40 *tzdbg_ptr_tz40;
+
+	num_int = (uint32_t *)((unsigned char *)tzdbg.diag_buf +
+			(tzdbg.diag_buf->int_info_off - sizeof(uint32_t)));
+	ptr = ((unsigned char *)tzdbg.diag_buf +
+					tzdbg.diag_buf->int_info_off);
+	int_info_size = ((tzdbg.diag_buf->ring_off -
+				tzdbg.diag_buf->int_info_off)/(*num_int));
+
+	pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
+
+	if (tzdbg.tz_version < QSEE_VERSION_TZ_4_X) {
+		for (i = 0; i < (*num_int); i++) {
+			tzdbg_ptr = (struct tzdbg_int_t *)ptr;
+			len += snprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"     Interrupt Number          : 0x%x\n"
+				"     Type of Interrupt         : 0x%x\n"
+				"     Description of interrupt  : %s\n",
+				tzdbg_ptr->int_num,
+				(uint32_t)tzdbg_ptr->int_info,
+				(uint8_t *)tzdbg_ptr->int_desc);
+			for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) {
+				len += snprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"     int_count on CPU # %d      : %u\n",
+				(uint32_t)j,
+				(uint32_t)tzdbg_ptr->int_count[j]);
+			}
+			len += snprintf(tzdbg.disp_buf + len,
+					debug_rw_buf_size - 1, "\n");
+
+			if (len > (debug_rw_buf_size - 1)) {
+				pr_warn("%s: Cannot fit all info into buf\n",
+								__func__);
+				break;
+			}
+			ptr += int_info_size;
+		}
+	} else {
+		for (i = 0; i < (*num_int); i++) {
+			tzdbg_ptr_tz40 = (struct tzdbg_int_t_tz40 *)ptr;
+			len += snprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"     Interrupt Number          : 0x%x\n"
+				"     Type of Interrupt         : 0x%x\n"
+				"     Description of interrupt  : %s\n",
+				tzdbg_ptr_tz40->int_num,
+				(uint32_t)tzdbg_ptr_tz40->int_info,
+				(uint8_t *)tzdbg_ptr_tz40->int_desc);
+			for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) {
+				len += snprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"     int_count on CPU # %d      : %u\n",
+				(uint32_t)j,
+				(uint32_t)tzdbg_ptr_tz40->int_count[j]);
+			}
+			len += snprintf(tzdbg.disp_buf + len,
+					debug_rw_buf_size - 1, "\n");
+
+			if (len > (debug_rw_buf_size - 1)) {
+				pr_warn("%s: Cannot fit all info into buf\n",
+								__func__);
+				break;
+			}
+			ptr += int_info_size;
+		}
+	}
+
+	tzdbg.stat[TZDBG_INTERRUPT].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_log_stats_legacy(void)
+{
+	int len = 0;
+	unsigned char *ptr;
+
+	ptr = (unsigned char *)tzdbg.diag_buf +
+					tzdbg.diag_buf->ring_off;
+	len += snprintf(tzdbg.disp_buf, (debug_rw_buf_size - 1) - len,
+							"%s\n", ptr);
+
+	tzdbg.stat[TZDBG_LOG].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_log_stats(struct tzdbg_log_t *log,
+			struct tzdbg_log_pos_t *log_start, uint32_t log_len,
+			size_t count, uint32_t buf_idx)
+{
+	uint32_t wrap_start;
+	uint32_t wrap_end;
+	uint32_t wrap_cnt;
+	int max_len;
+	int len = 0;
+	int i = 0;
+
+	wrap_start = log_start->wrap;
+	wrap_end = log->log_pos.wrap;
+
+	/* Calculate difference in # of buffer wrap-arounds */
+	if (wrap_end >= wrap_start) {
+		wrap_cnt = wrap_end - wrap_start;
+	} else {
+		/* wrap counter has wrapped around, invalidate start position */
+		wrap_cnt = 2;
+	}
+
+	if (wrap_cnt > 1) {
+		/* end position has wrapped around more than once, */
+		/* current start no longer valid                   */
+		log_start->wrap = log->log_pos.wrap - 1;
+		log_start->offset = (log->log_pos.offset + 1) % log_len;
+	} else if ((wrap_cnt == 1) &&
+		(log->log_pos.offset > log_start->offset)) {
+		/* end position has overwritten start */
+		log_start->offset = (log->log_pos.offset + 1) % log_len;
+	}
+
+	while (log_start->offset == log->log_pos.offset) {
+		/*
+		 * No data in ring buffer,
+		 * so we'll hang around until something happens
+		 */
+		unsigned long t = msleep_interruptible(50);
+
+		if (t != 0) {
+			/* Some event woke us up, so let's quit */
+			return 0;
+		}
+
+		if (buf_idx == TZDBG_LOG)
+			memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
+						debug_rw_buf_size);
+
+	}
+
+	max_len = (count > debug_rw_buf_size) ? debug_rw_buf_size : count;
+
+	/*
+	 *  Read from ring buff while there is data and space in return buff
+	 */
+	while ((log_start->offset != log->log_pos.offset) && (len < max_len)) {
+		tzdbg.disp_buf[i++] = log->log_buf[log_start->offset];
+		log_start->offset = (log_start->offset + 1) % log_len;
+		if (log_start->offset == 0)
+			++log_start->wrap;
+		++len;
+	}
+
+	/*
+	 * return buffer to caller
+	 */
+	tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int __disp_hyp_log_stats(uint8_t *log,
+			struct hypdbg_log_pos_t *log_start, uint32_t log_len,
+			size_t count, uint32_t buf_idx)
+{
+	struct hypdbg_t *hyp = tzdbg.hyp_diag_buf;
+	unsigned long t = 0;
+	uint32_t wrap_start;
+	uint32_t wrap_end;
+	uint32_t wrap_cnt;
+	int max_len;
+	int len = 0;
+	int i = 0;
+
+	wrap_start = log_start->wrap;
+	wrap_end = hyp->log_pos.wrap;
+
+	/* Calculate difference in # of buffer wrap-arounds */
+	if (wrap_end >= wrap_start) {
+		wrap_cnt = wrap_end - wrap_start;
+	} else {
+		/* wrap counter has wrapped around, invalidate start position */
+		wrap_cnt = 2;
+	}
+
+	if (wrap_cnt > 1) {
+		/* end position has wrapped around more than once, */
+		/* current start no longer valid                   */
+		log_start->wrap = hyp->log_pos.wrap - 1;
+		log_start->offset = (hyp->log_pos.offset + 1) % log_len;
+	} else if ((wrap_cnt == 1) &&
+		(hyp->log_pos.offset > log_start->offset)) {
+		/* end position has overwritten start */
+		log_start->offset = (hyp->log_pos.offset + 1) % log_len;
+	}
+
+	while (log_start->offset == hyp->log_pos.offset) {
+		/*
+		 * No data in ring buffer,
+		 * so we'll hang around until something happens
+		 */
+		t = msleep_interruptible(50);
+		if (t != 0) {
+			/* Some event woke us up, so let's quit */
+			return 0;
+		}
+
+		/* TZDBG_HYP_LOG */
+		memcpy_fromio((void *)tzdbg.hyp_diag_buf, tzdbg.hyp_virt_iobase,
+						tzdbg.hyp_debug_rw_buf_size);
+	}
+
+	max_len = (count > tzdbg.hyp_debug_rw_buf_size) ?
+				tzdbg.hyp_debug_rw_buf_size : count;
+
+	/*
+	 *  Read from ring buff while there is data and space in return buff
+	 */
+	while ((log_start->offset != hyp->log_pos.offset) && (len < max_len)) {
+		tzdbg.disp_buf[i++] = log[log_start->offset];
+		log_start->offset = (log_start->offset + 1) % log_len;
+		if (log_start->offset == 0)
+			++log_start->wrap;
+		++len;
+	}
+
+	/*
+	 * return buffer to caller
+	 */
+	tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_log_stats(size_t count)
+{
+	static struct tzdbg_log_pos_t log_start = {0};
+	struct tzdbg_log_t *log_ptr;
+
+	log_ptr = (struct tzdbg_log_t *)((unsigned char *)tzdbg.diag_buf +
+				tzdbg.diag_buf->ring_off -
+				offsetof(struct tzdbg_log_t, log_buf));
+
+	return _disp_log_stats(log_ptr, &log_start,
+				tzdbg.diag_buf->ring_len, count, TZDBG_LOG);
+}
+
+static int _disp_hyp_log_stats(size_t count)
+{
+	static struct hypdbg_log_pos_t log_start = {0};
+	uint8_t *log_ptr;
+
+	log_ptr = (uint8_t *)((unsigned char *)tzdbg.hyp_diag_buf +
+				tzdbg.hyp_diag_buf->ring_off);
+
+	return __disp_hyp_log_stats(log_ptr, &log_start,
+			tzdbg.hyp_debug_rw_buf_size, count, TZDBG_HYP_LOG);
+}
+
+static int _disp_qsee_log_stats(size_t count)
+{
+	static struct tzdbg_log_pos_t log_start = {0};
+
+	return _disp_log_stats(g_qsee_log, &log_start,
+			QSEE_LOG_BUF_SIZE - sizeof(struct tzdbg_log_pos_t),
+			count, TZDBG_QSEE_LOG);
+}
+
+static int _disp_hyp_general_stats(size_t count)
+{
+	int len = 0;
+	int i;
+	struct hypdbg_boot_info_t *ptr = NULL;
+
+	len += snprintf((unsigned char *)tzdbg.disp_buf + len,
+			tzdbg.hyp_debug_rw_buf_size - 1,
+			"   Magic Number    : 0x%x\n"
+			"   CPU Count       : 0x%x\n"
+			"   S2 Fault Counter: 0x%x\n",
+			tzdbg.hyp_diag_buf->magic_num,
+			tzdbg.hyp_diag_buf->cpu_count,
+			tzdbg.hyp_diag_buf->s2_fault_counter);
+
+	ptr = tzdbg.hyp_diag_buf->boot_info;
+	for (i = 0; i < tzdbg.hyp_diag_buf->cpu_count; i++) {
+		len += snprintf((unsigned char *)tzdbg.disp_buf + len,
+				(tzdbg.hyp_debug_rw_buf_size - 1) - len,
+				"  CPU #: %d\n"
+				"     Warmboot entry CPU counter: 0x%x\n"
+				"     Warmboot exit CPU counter : 0x%x\n",
+				i, ptr->warm_entry_cnt, ptr->warm_exit_cnt);
+
+		if (len > (tzdbg.hyp_debug_rw_buf_size - 1)) {
+			pr_warn("%s: Cannot fit all info into the buffer\n",
+								__func__);
+			break;
+		}
+		ptr++;
+	}
+
+	tzdbg.stat[TZDBG_HYP_GENERAL].data = (char *)tzdbg.disp_buf;
+	return len;
+}
+
+static ssize_t tzdbgfs_read(struct file *file, char __user *buf,
+	size_t count, loff_t *offp)
+{
+	int len = 0;
+	int *tz_id =  file->private_data;
+
+	if (*tz_id == TZDBG_BOOT || *tz_id == TZDBG_RESET ||
+		*tz_id == TZDBG_INTERRUPT || *tz_id == TZDBG_GENERAL ||
+		*tz_id == TZDBG_VMID || *tz_id == TZDBG_LOG)
+		memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
+						debug_rw_buf_size);
+
+	if (*tz_id == TZDBG_HYP_GENERAL || *tz_id == TZDBG_HYP_LOG)
+		memcpy_fromio((void *)tzdbg.hyp_diag_buf, tzdbg.hyp_virt_iobase,
+					tzdbg.hyp_debug_rw_buf_size);
+
+	switch (*tz_id) {
+	case TZDBG_BOOT:
+		len = _disp_tz_boot_stats();
+		break;
+	case TZDBG_RESET:
+		len = _disp_tz_reset_stats();
+		break;
+	case TZDBG_INTERRUPT:
+		len = _disp_tz_interrupt_stats();
+		break;
+	case TZDBG_GENERAL:
+		len = _disp_tz_general_stats();
+		break;
+	case TZDBG_VMID:
+		len = _disp_tz_vmid_stats();
+		break;
+	case TZDBG_LOG:
+		if (TZBSP_DIAG_MAJOR_VERSION_LEGACY <
+				(tzdbg.diag_buf->version >> 16)) {
+			len = _disp_tz_log_stats(count);
+			*offp = 0;
+		} else {
+			len = _disp_tz_log_stats_legacy();
+		}
+		break;
+	case TZDBG_QSEE_LOG:
+		len = _disp_qsee_log_stats(count);
+		*offp = 0;
+		break;
+	case TZDBG_HYP_GENERAL:
+		len = _disp_hyp_general_stats(count);
+		break;
+	case TZDBG_HYP_LOG:
+		len = _disp_hyp_log_stats(count);
+		*offp = 0;
+		break;
+	default:
+		break;
+	}
+
+	if (len > count)
+		len = count;
+
+	return simple_read_from_buffer(buf, len, offp,
+				tzdbg.stat[(*tz_id)].data, len);
+}
+
+static int tzdbgfs_open(struct inode *inode, struct file *pfile)
+{
+	pfile->private_data = inode->i_private;
+	return 0;
+}
+
+const struct file_operations tzdbg_fops = {
+	.owner   = THIS_MODULE,
+	.read    = tzdbgfs_read,
+	.open    = tzdbgfs_open,
+};
+
+static struct ion_client  *g_ion_clnt;
+static struct ion_handle *g_ihandle;
+
+/*
+ * Allocates log buffer from ION, registers the buffer at TZ
+ */
+static void tzdbg_register_qsee_log_buf(void)
+{
+	/* register log buffer scm request */
+	struct qseecom_reg_log_buf_ireq req;
+
+	/* scm response */
+	struct qseecom_command_scm_resp resp = {};
+	ion_phys_addr_t pa = 0;
+	size_t len;
+	int ret = 0;
+
+	/* Create ION msm client */
+	g_ion_clnt = msm_ion_client_create("qsee_log");
+	if (g_ion_clnt == NULL) {
+		pr_err("%s: Ion client cannot be created\n", __func__);
+		return;
+	}
+
+	g_ihandle = ion_alloc(g_ion_clnt, QSEE_LOG_BUF_SIZE,
+			4096, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+	if (IS_ERR_OR_NULL(g_ihandle)) {
+		pr_err("%s: Ion client could not retrieve the handle\n",
+			__func__);
+		goto err1;
+	}
+
+	ret = ion_phys(g_ion_clnt, g_ihandle, &pa, &len);
+	if (ret) {
+		pr_err("%s: Ion conversion to physical address failed\n",
+			__func__);
+		goto err2;
+	}
+
+	req.qsee_cmd_id = QSEOS_REGISTER_LOG_BUF_COMMAND;
+	req.phy_addr = (uint32_t)pa;
+	req.len = len;
+
+	if (!is_scm_armv8()) {
+		/*  SCM_CALL  to register the log buffer */
+		ret = scm_call(SCM_SVC_TZSCHEDULER, 1,  &req, sizeof(req),
+			&resp, sizeof(resp));
+	} else {
+		struct scm_desc desc = {0};
+
+		desc.args[0] = pa;
+		desc.args[1] = len;
+		desc.arginfo = 0x22;
+		ret = scm_call2(SCM_QSEEOS_FNID(1, 6), &desc);
+		resp.result = desc.ret[0];
+	}
+
+	if (ret) {
+		pr_err("%s: scm_call to register log buffer failed\n",
+			__func__);
+		goto err2;
+	}
+
+	if (resp.result != QSEOS_RESULT_SUCCESS) {
+		pr_err(
+		"%s: scm_call to register log buf failed, resp result =%d\n",
+		__func__, resp.result);
+		goto err2;
+	}
+
+	g_qsee_log =
+		(struct tzdbg_log_t *)ion_map_kernel(g_ion_clnt, g_ihandle);
+
+	if (IS_ERR(g_qsee_log)) {
+		pr_err("%s: Couldn't map ion buffer to kernel\n",
+			__func__);
+		goto err2;
+	}
+
+	g_qsee_log->log_pos.wrap = g_qsee_log->log_pos.offset = 0;
+	return;
+
+err2:
+	ion_free(g_ion_clnt, g_ihandle);
+	g_ihandle = NULL;
+err1:
+	ion_client_destroy(g_ion_clnt);
+	g_ion_clnt = NULL;
+}
+
+static int  tzdbgfs_init(struct platform_device *pdev)
+{
+	int rc = 0;
+	int i;
+	struct dentry           *dent_dir;
+	struct dentry           *dent;
+
+	dent_dir = debugfs_create_dir("tzdbg", NULL);
+	if (dent_dir == NULL) {
+		dev_err(&pdev->dev, "tzdbg debugfs_create_dir failed\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < TZDBG_STATS_MAX; i++) {
+		tzdbg.debug_tz[i] = i;
+		dent = debugfs_create_file(tzdbg.stat[i].name,
+				0444, dent_dir,
+				&tzdbg.debug_tz[i], &tzdbg_fops);
+		if (dent == NULL) {
+			dev_err(&pdev->dev, "TZ debugfs_create_file failed\n");
+			rc = -ENOMEM;
+			goto err;
+		}
+	}
+	tzdbg.disp_buf = kzalloc(max(debug_rw_buf_size,
+			tzdbg.hyp_debug_rw_buf_size), GFP_KERNEL);
+	if (tzdbg.disp_buf == NULL)
+		goto err;
+	platform_set_drvdata(pdev, dent_dir);
+	return 0;
+err:
+	debugfs_remove_recursive(dent_dir);
+
+	return rc;
+}
+
+static void tzdbgfs_exit(struct platform_device *pdev)
+{
+	struct dentry           *dent_dir;
+
+	kzfree(tzdbg.disp_buf);
+	dent_dir = platform_get_drvdata(pdev);
+	debugfs_remove_recursive(dent_dir);
+	if (g_ion_clnt != NULL) {
+		if (!IS_ERR_OR_NULL(g_ihandle)) {
+			ion_unmap_kernel(g_ion_clnt, g_ihandle);
+			ion_free(g_ion_clnt, g_ihandle);
+		}
+		ion_client_destroy(g_ion_clnt);
+	}
+}
+
+static int __update_hypdbg_base(struct platform_device *pdev,
+			void __iomem *virt_iobase)
+{
+	phys_addr_t hypdiag_phy_iobase;
+	uint32_t hyp_address_offset;
+	uint32_t hyp_size_offset;
+	struct hypdbg_t *hyp;
+	uint32_t *ptr = NULL;
+
+	if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-address-offset",
+							&hyp_address_offset)) {
+		dev_err(&pdev->dev, "hyplog address offset is not defined\n");
+		return -EINVAL;
+	}
+	if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-size-offset",
+							&hyp_size_offset)) {
+		dev_err(&pdev->dev, "hyplog size offset is not defined\n");
+		return -EINVAL;
+	}
+
+	hypdiag_phy_iobase = readl_relaxed(virt_iobase + hyp_address_offset);
+	tzdbg.hyp_debug_rw_buf_size = readl_relaxed(virt_iobase +
+					hyp_size_offset);
+
+	tzdbg.hyp_virt_iobase = devm_ioremap_nocache(&pdev->dev,
+					hypdiag_phy_iobase,
+					tzdbg.hyp_debug_rw_buf_size);
+	if (!tzdbg.hyp_virt_iobase) {
+		dev_err(&pdev->dev, "ERROR could not ioremap: start=%pr, len=%u\n",
+			&hypdiag_phy_iobase, tzdbg.hyp_debug_rw_buf_size);
+		return -ENXIO;
+	}
+
+	ptr = kzalloc(tzdbg.hyp_debug_rw_buf_size, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	tzdbg.hyp_diag_buf = (struct hypdbg_t *)ptr;
+	hyp = tzdbg.hyp_diag_buf;
+	hyp->log_pos.wrap = hyp->log_pos.offset = 0;
+	return 0;
+}
+
+static void tzdbg_get_tz_version(void)
+{
+	uint32_t smc_id = 0;
+	uint32_t feature = 10;
+	struct qseecom_command_scm_resp resp = {0};
+	struct scm_desc desc = {0};
+	int ret = 0;
+
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_INFO, SCM_SVC_UTIL,  &feature,
+					sizeof(feature), &resp, sizeof(resp));
+	} else {
+		smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
+		desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
+		desc.args[0] = feature;
+		ret = scm_call2(smc_id, &desc);
+		resp.result = desc.ret[0];
+	}
+
+	if (ret)
+		pr_err("%s: scm_call to get tz version failed\n",
+				__func__);
+	else
+		tzdbg.tz_version = resp.result;
+
+}
+
+/*
+ * Driver functions
+ */
+static int tz_log_probe(struct platform_device *pdev)
+{
+	struct resource *resource;
+	void __iomem *virt_iobase;
+	phys_addr_t tzdiag_phy_iobase;
+	uint32_t *ptr = NULL;
+	int ret = 0;
+
+	/*
+	 * Get address that stores the physical location diagnostic data
+	 */
+	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!resource) {
+		dev_err(&pdev->dev,
+				"%s: ERROR Missing MEM resource\n", __func__);
+		return -ENXIO;
+	};
+
+	/*
+	 * Get the debug buffer size
+	 */
+	debug_rw_buf_size = resource->end - resource->start + 1;
+
+	/*
+	 * Map address that stores the physical location diagnostic data
+	 */
+	virt_iobase = devm_ioremap_nocache(&pdev->dev, resource->start,
+				debug_rw_buf_size);
+	if (!virt_iobase) {
+		dev_err(&pdev->dev,
+			"%s: ERROR could not ioremap: start=%pr, len=%u\n",
+			__func__, &resource->start,
+			(unsigned int)(debug_rw_buf_size));
+		return -ENXIO;
+	}
+
+	if (pdev->dev.of_node) {
+		tzdbg.is_hyplog_enabled = of_property_read_bool(
+			(&pdev->dev)->of_node, "qcom,hyplog-enabled");
+		if (tzdbg.is_hyplog_enabled) {
+			ret = __update_hypdbg_base(pdev, virt_iobase);
+			if (ret) {
+				dev_err(&pdev->dev, "%s() failed to get device tree data ret = %d\n",
+						__func__, ret);
+				return -EINVAL;
+			}
+		} else {
+			dev_info(&pdev->dev, "Hyp log service is not supported\n");
+		}
+	} else {
+		dev_dbg(&pdev->dev, "Device tree data is not found\n");
+	}
+
+	/*
+	 * Retrieve the address of diagnostic data
+	 */
+	tzdiag_phy_iobase = readl_relaxed(virt_iobase);
+
+	/*
+	 * Map the diagnostic information area
+	 */
+	tzdbg.virt_iobase = devm_ioremap_nocache(&pdev->dev,
+				tzdiag_phy_iobase, debug_rw_buf_size);
+
+	if (!tzdbg.virt_iobase) {
+		dev_err(&pdev->dev,
+			"%s: ERROR could not ioremap: start=%pr, len=%u\n",
+			__func__, &tzdiag_phy_iobase,
+			debug_rw_buf_size);
+		return -ENXIO;
+	}
+
+	ptr = kzalloc(debug_rw_buf_size, GFP_KERNEL);
+	if (ptr == NULL)
+		return -ENXIO;
+
+	tzdbg.diag_buf = (struct tzdbg_t *)ptr;
+
+	if (tzdbgfs_init(pdev))
+		goto err;
+
+	tzdbg_register_qsee_log_buf();
+
+	tzdbg_get_tz_version();
+
+	return 0;
+err:
+	kfree(tzdbg.diag_buf);
+	return -ENXIO;
+}
+
+
+static int tz_log_remove(struct platform_device *pdev)
+{
+	kzfree(tzdbg.diag_buf);
+	if (tzdbg.hyp_diag_buf)
+		kzfree(tzdbg.hyp_diag_buf);
+	tzdbgfs_exit(pdev);
+
+	return 0;
+}
+
+static const struct of_device_id tzlog_match[] = {
+	{	.compatible = "qcom,tz-log",
+	},
+	{}
+};
+
+static struct platform_driver tz_log_driver = {
+	.probe		= tz_log_probe,
+	.remove		= tz_log_remove,
+	.driver		= {
+		.name = "tz_log",
+		.owner = THIS_MODULE,
+		.of_match_table = tzlog_match,
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+	},
+};
+
+static int __init tz_log_init(void)
+{
+	return platform_driver_register(&tz_log_driver);
+}
+
+static void __exit tz_log_exit(void)
+{
+	platform_driver_unregister(&tz_log_driver);
+}
+
+module_init(tz_log_init);
+module_exit(tz_log_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TZ Log driver");
+MODULE_ALIAS("platform:tz_log");
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index b1254f8..b87d278 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -1299,6 +1299,8 @@
 		goto out_pm_put;
 	}
 
+	mutex_lock(&gpu->lock);
+
 	fence = etnaviv_gpu_fence_alloc(gpu);
 	if (!fence) {
 		event_free(gpu, event);
@@ -1306,8 +1308,6 @@
 		goto out_pm_put;
 	}
 
-	mutex_lock(&gpu->lock);
-
 	gpu->event[event].fence = fence;
 	submit->fence = fence->seqno;
 	gpu->active_fence = submit->fence;
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
index 2fcf10ba..cc87775 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_manager.c
@@ -503,7 +503,7 @@
 			goto error_disable_master;
 		}
 	}
-
+	return rc;
 error_disable_master:
 	(void)dsi_core_clk_stop(m_clks);
 
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9d2e95b..ced015f 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -305,6 +305,7 @@
 	}
 
 	sde_dbg_destroy();
+	debugfs_remove_recursive(priv->debug_root);
 
 	component_unbind_all(dev, ddev);
 	sde_power_client_destroy(&priv->phandle, priv->pclient);
@@ -611,7 +612,16 @@
 	if (ret)
 		goto fail;
 
-	ret = sde_dbg_debugfs_register(ddev->primary->debugfs_root);
+	priv->debug_root = debugfs_create_dir("debug",
+					ddev->primary->debugfs_root);
+	if (IS_ERR_OR_NULL(priv->debug_root)) {
+		pr_err("debugfs_root create_dir fail, error %ld\n",
+		       PTR_ERR(priv->debug_root));
+		priv->debug_root = NULL;
+		goto fail;
+	}
+
+	ret = sde_dbg_debugfs_register(priv->debug_root);
 	if (ret) {
 		dev_err(dev, "failed to reg sde dbg debugfs: %d\n", ret);
 		goto fail;
@@ -1696,6 +1706,13 @@
 	{ },
 };
 
+#ifdef CONFIG_QCOM_KGSL
+static int add_gpu_components(struct device *dev,
+			      struct component_match **matchptr)
+{
+	return 0;
+}
+#else
 static int add_gpu_components(struct device *dev,
 			      struct component_match **matchptr)
 {
@@ -1711,6 +1728,7 @@
 
 	return 0;
 }
+#endif
 
 static int msm_drm_bind(struct device *dev)
 {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index f2fccd7..da76fbc 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -492,6 +492,9 @@
 
 	/* whether registered and drm_dev_unregister should be called */
 	bool registered;
+
+	/* msm drv debug root node */
+	struct dentry *debug_root;
 };
 
 struct msm_format {
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 27b3df7..54acf41a 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -117,6 +117,374 @@
 									enable);
 }
 
+/**
+ * _sde_crtc_rp_to_crtc - get crtc from resource pool object
+ * @rp: Pointer to resource pool
+ * return: Pointer to drm crtc if success; null otherwise
+ */
+static struct drm_crtc *_sde_crtc_rp_to_crtc(struct sde_crtc_respool *rp)
+{
+	if (!rp)
+		return NULL;
+
+	return container_of(rp, struct sde_crtc_state, rp)->base.crtc;
+}
+
+/**
+ * _sde_crtc_rp_reclaim - reclaim unused, or all if forced, resources in pool
+ * @rp: Pointer to resource pool
+ * @force: True to reclaim all resources; otherwise, reclaim only unused ones
+ * return: None
+ */
+static void _sde_crtc_rp_reclaim(struct sde_crtc_respool *rp, bool force)
+{
+	struct sde_crtc_res *res, *next;
+	struct drm_crtc *crtc;
+
+	crtc = _sde_crtc_rp_to_crtc(rp);
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+
+	SDE_DEBUG("crtc%d.%u %s\n", crtc->base.id, rp->sequence_id,
+			force ? "destroy" : "free_unused");
+
+	list_for_each_entry_safe(res, next, &rp->res_list, list) {
+		if (!force && !(res->flags & SDE_CRTC_RES_FLAG_FREE))
+			continue;
+		SDE_DEBUG("crtc%d.%u reclaim res:0x%x/0x%llx/%pK/%d\n",
+				crtc->base.id, rp->sequence_id,
+				res->type, res->tag, res->val,
+				atomic_read(&res->refcount));
+		list_del(&res->list);
+		if (res->ops.put)
+			res->ops.put(res->val);
+		kfree(res);
+	}
+}
+
+/**
+ * _sde_crtc_rp_free_unused - free unused resource in pool
+ * @rp: Pointer to resource pool
+ * return: none
+ */
+static void _sde_crtc_rp_free_unused(struct sde_crtc_respool *rp)
+{
+	_sde_crtc_rp_reclaim(rp, false);
+}
+
+/**
+ * _sde_crtc_rp_destroy - destroy resource pool
+ * @rp: Pointer to resource pool
+ * return: None
+ */
+static void _sde_crtc_rp_destroy(struct sde_crtc_respool *rp)
+{
+	_sde_crtc_rp_reclaim(rp, true);
+}
+
+/**
+ * _sde_crtc_hw_blk_get - get callback for hardware block
+ * @val: Resource handle
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: Resource handle
+ */
+static void *_sde_crtc_hw_blk_get(void *val, u32 type, u64 tag)
+{
+	SDE_DEBUG("res:%d/0x%llx/%pK\n", type, tag, val);
+	return sde_hw_blk_get(val, type, tag);
+}
+
+/**
+ * _sde_crtc_hw_blk_put - put callback for hardware block
+ * @val: Resource handle
+ * return: None
+ */
+static void _sde_crtc_hw_blk_put(void *val)
+{
+	SDE_DEBUG("res://%pK\n", val);
+	sde_hw_blk_put(val);
+}
+
+/**
+ * _sde_crtc_rp_duplicate - duplicate resource pool and reset reference count
+ * @rp: Pointer to original resource pool
+ * @dup_rp: Pointer to duplicated resource pool
+ * return: None
+ */
+static void _sde_crtc_rp_duplicate(struct sde_crtc_respool *rp,
+		struct sde_crtc_respool *dup_rp)
+{
+	struct sde_crtc_res *res, *dup_res;
+	struct drm_crtc *crtc;
+
+	if (!rp || !dup_rp) {
+		SDE_ERROR("invalid resource pool\n");
+		return;
+	}
+
+	crtc = _sde_crtc_rp_to_crtc(rp);
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+
+	SDE_DEBUG("crtc%d.%u duplicate\n", crtc->base.id, rp->sequence_id);
+
+	dup_rp->sequence_id = rp->sequence_id + 1;
+	INIT_LIST_HEAD(&dup_rp->res_list);
+	dup_rp->ops = rp->ops;
+	list_for_each_entry(res, &rp->res_list, list) {
+		dup_res = kzalloc(sizeof(struct sde_crtc_res), GFP_KERNEL);
+		if (!dup_res)
+			return;
+		INIT_LIST_HEAD(&dup_res->list);
+		atomic_set(&dup_res->refcount, 0);
+		dup_res->type = res->type;
+		dup_res->tag = res->tag;
+		dup_res->val = res->val;
+		dup_res->ops = res->ops;
+		dup_res->flags = SDE_CRTC_RES_FLAG_FREE;
+		SDE_DEBUG("crtc%d.%u dup res:0x%x/0x%llx/%pK/%d\n",
+				crtc->base.id, dup_rp->sequence_id,
+				dup_res->type, dup_res->tag, dup_res->val,
+				atomic_read(&dup_res->refcount));
+		list_add_tail(&dup_res->list, &dup_rp->res_list);
+		if (dup_res->ops.get)
+			dup_res->ops.get(dup_res->val, 0, -1);
+	}
+}
+
+/**
+ * _sde_crtc_rp_reset - reset resource pool after allocation
+ * @rp: Pointer to original resource pool
+ * return: None
+ */
+static void _sde_crtc_rp_reset(struct sde_crtc_respool *rp)
+{
+	if (!rp) {
+		SDE_ERROR("invalid resource pool\n");
+		return;
+	}
+
+	rp->sequence_id = 0;
+	INIT_LIST_HEAD(&rp->res_list);
+	rp->ops.get = _sde_crtc_hw_blk_get;
+	rp->ops.put = _sde_crtc_hw_blk_put;
+}
+
+/**
+ * _sde_crtc_rp_add - add given resource to resource pool
+ * @rp: Pointer to original resource pool
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * @val: Resource handle
+ * @ops: Resource callback operations
+ * return: 0 if success; error code otherwise
+ */
+static int _sde_crtc_rp_add(struct sde_crtc_respool *rp, u32 type, u64 tag,
+		void *val, struct sde_crtc_res_ops *ops)
+{
+	struct sde_crtc_res *res;
+	struct drm_crtc *crtc;
+
+	if (!rp || !ops) {
+		SDE_ERROR("invalid resource pool/ops\n");
+		return -EINVAL;
+	}
+
+	crtc = _sde_crtc_rp_to_crtc(rp);
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return -EINVAL;
+	}
+
+	list_for_each_entry(res, &rp->res_list, list) {
+		if (res->type != type || res->tag != tag)
+			continue;
+		SDE_ERROR("crtc%d.%u already exist res:0x%x/0x%llx/%pK/%d\n",
+				crtc->base.id, rp->sequence_id,
+				res->type, res->tag, res->val,
+				atomic_read(&res->refcount));
+		return -EEXIST;
+	}
+	res = kzalloc(sizeof(struct sde_crtc_res), GFP_KERNEL);
+	if (!res)
+		return -ENOMEM;
+	INIT_LIST_HEAD(&res->list);
+	atomic_set(&res->refcount, 1);
+	res->type = type;
+	res->tag = tag;
+	res->val = val;
+	res->ops = *ops;
+	list_add_tail(&res->list, &rp->res_list);
+	SDE_DEBUG("crtc%d.%u added res:0x%x/0x%llx\n",
+			crtc->base.id, rp->sequence_id, type, tag);
+	return 0;
+}
+
+/**
+ * _sde_crtc_rp_get - lookup the resource from given resource pool and obtain
+ *	if available; otherwise, obtain resource from global pool
+ * @rp: Pointer to original resource pool
+ * @type: Resource type
+ * @tag:  Search tag for given resource
+ * return: Resource handle if success; pointer error or null otherwise
+ */
+static void *_sde_crtc_rp_get(struct sde_crtc_respool *rp, u32 type, u64 tag)
+{
+	struct sde_crtc_res *res;
+	void *val = NULL;
+	int rc;
+	struct drm_crtc *crtc;
+
+	if (!rp) {
+		SDE_ERROR("invalid resource pool\n");
+		return NULL;
+	}
+
+	crtc = _sde_crtc_rp_to_crtc(rp);
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return NULL;
+	}
+
+	list_for_each_entry(res, &rp->res_list, list) {
+		if (res->type != type || res->tag != tag)
+			continue;
+		SDE_DEBUG("crtc%d.%u found res:0x%x/0x%llx/%pK/%d\n",
+				crtc->base.id, rp->sequence_id,
+				res->type, res->tag, res->val,
+				atomic_read(&res->refcount));
+		atomic_inc(&res->refcount);
+		res->flags &= ~SDE_CRTC_RES_FLAG_FREE;
+		return res->val;
+	}
+	list_for_each_entry(res, &rp->res_list, list) {
+		if (res->type != type || !(res->flags & SDE_CRTC_RES_FLAG_FREE))
+			continue;
+		SDE_DEBUG("crtc%d.%u retag res:0x%x/0x%llx/%pK/%d\n",
+				crtc->base.id, rp->sequence_id,
+				res->type, res->tag, res->val,
+				atomic_read(&res->refcount));
+		atomic_inc(&res->refcount);
+		res->tag = tag;
+		res->flags &= ~SDE_CRTC_RES_FLAG_FREE;
+		return res->val;
+	}
+	if (rp->ops.get)
+		val = rp->ops.get(NULL, type, -1);
+	if (IS_ERR_OR_NULL(val)) {
+		SDE_ERROR("crtc%d.%u failed to get res:0x%x//\n",
+				crtc->base.id, rp->sequence_id, type);
+		return NULL;
+	}
+	rc = _sde_crtc_rp_add(rp, type, tag, val, &rp->ops);
+	if (rc) {
+		SDE_ERROR("crtc%d.%u failed to add res:0x%x/0x%llx\n",
+				crtc->base.id, rp->sequence_id, type, tag);
+		if (rp->ops.put)
+			rp->ops.put(val);
+		val = NULL;
+	}
+	return val;
+}
+
+/**
+ * _sde_crtc_rp_put - return given resource to resource pool
+ * @rp: Pointer to original resource pool
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: None
+ */
+static void _sde_crtc_rp_put(struct sde_crtc_respool *rp, u32 type, u64 tag)
+{
+	struct sde_crtc_res *res, *next;
+	struct drm_crtc *crtc;
+
+	if (!rp) {
+		SDE_ERROR("invalid resource pool\n");
+		return;
+	}
+
+	crtc = _sde_crtc_rp_to_crtc(rp);
+	if (!crtc) {
+		SDE_ERROR("invalid crtc\n");
+		return;
+	}
+
+	list_for_each_entry_safe(res, next, &rp->res_list, list) {
+		if (res->type != type || res->tag != tag)
+			continue;
+		SDE_DEBUG("crtc%d.%u found res:0x%x/0x%llx/%pK/%d\n",
+				crtc->base.id, rp->sequence_id,
+				res->type, res->tag, res->val,
+				atomic_read(&res->refcount));
+		if (res->flags & SDE_CRTC_RES_FLAG_FREE)
+			SDE_ERROR(
+				"crtc%d.%u already free res:0x%x/0x%llx/%pK/%d\n",
+					crtc->base.id, rp->sequence_id,
+					res->type, res->tag, res->val,
+					atomic_read(&res->refcount));
+		else if (atomic_dec_return(&res->refcount) == 0)
+			res->flags |= SDE_CRTC_RES_FLAG_FREE;
+
+		return;
+	}
+	SDE_ERROR("crtc%d.%u not found res:0x%x/0x%llx\n",
+			crtc->base.id, rp->sequence_id, type, tag);
+}
+
+int sde_crtc_res_add(struct drm_crtc_state *state, u32 type, u64 tag,
+		void *val, struct sde_crtc_res_ops *ops)
+{
+	struct sde_crtc_respool *rp;
+
+	if (!state) {
+		SDE_ERROR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	rp = &to_sde_crtc_state(state)->rp;
+	return _sde_crtc_rp_add(rp, type, tag, val, ops);
+}
+
+void *sde_crtc_res_get(struct drm_crtc_state *state, u32 type, u64 tag)
+{
+	struct sde_crtc_respool *rp;
+	void *val;
+
+	if (!state) {
+		SDE_ERROR("invalid parameters\n");
+		return NULL;
+	}
+
+	rp = &to_sde_crtc_state(state)->rp;
+	val = _sde_crtc_rp_get(rp, type, tag);
+	if (IS_ERR(val)) {
+		SDE_ERROR("failed to get res type:0x%x:0x%llx\n",
+				type, tag);
+		return NULL;
+	}
+
+	return val;
+}
+
+void sde_crtc_res_put(struct drm_crtc_state *state, u32 type, u64 tag)
+{
+	struct sde_crtc_respool *rp;
+
+	if (!state) {
+		SDE_ERROR("invalid parameters\n");
+		return;
+	}
+
+	rp = &to_sde_crtc_state(state)->rp;
+	_sde_crtc_rp_put(rp, type, tag);
+}
+
 static void _sde_crtc_deinit_events(struct sde_crtc *sde_crtc)
 {
 	if (!sde_crtc)
@@ -224,8 +592,9 @@
 
 	lm->ops.setup_blend_config(lm, pstate->stage, fg_alpha,
 						bg_alpha, blend_op);
-	SDE_DEBUG("format 0x%x, alpha_enable %u fg alpha:0x%x bg alpha:0x%x \"\
-		 blend_op:0x%x\n", format->base.pixel_format,
+	SDE_DEBUG(
+		"format: %4.4s, alpha_enable %u fg alpha:0x%x bg alpha:0x%x blend_op:0x%x\n",
+		(char *) &format->base.pixel_format,
 		format->alpha_enable, fg_alpha, bg_alpha, blend_op);
 }
 
@@ -295,6 +664,8 @@
 	struct sde_crtc *sde_crtc, struct sde_crtc_mixer *mixer)
 {
 	struct drm_plane *plane;
+	struct drm_framebuffer *fb;
+	struct drm_plane_state *state;
 	struct sde_crtc_state *cstate;
 	struct sde_plane_state *pstate = NULL;
 	struct sde_format *format;
@@ -323,8 +694,12 @@
 	crtc_split_width = get_crtc_split_width(crtc);
 
 	drm_atomic_crtc_for_each_plane(plane, crtc) {
+		state = plane->state;
+		if (!state)
+			continue;
 
-		pstate = to_sde_plane_state(plane->state);
+		pstate = to_sde_plane_state(state);
+		fb = state->fb;
 
 		if (sde_plane_is_sbuf_mode(plane, &prefill))
 			sbuf_mode = true;
@@ -332,7 +707,7 @@
 		sde_plane_get_ctl_flush(plane, ctl, &flush_mask);
 
 		/* always stage plane on either left or right lm */
-		if (plane->state->crtc_x >= crtc_split_width) {
+		if (state->crtc_x >= crtc_split_width) {
 			lm_idx = RIGHT_MIXER;
 			idx = right_crtc_zpos_cnt[pstate->stage]++;
 		} else {
@@ -342,8 +717,7 @@
 
 		/* stage plane on right LM if it crosses the boundary */
 		lm_right = (lm_idx == LEFT_MIXER) &&
-		   (plane->state->crtc_x + plane->state->crtc_w >
-							crtc_split_width);
+		   (state->crtc_x + state->crtc_w > crtc_split_width);
 
 		stage_cfg->stage[lm_idx][pstate->stage][idx] =
 							sde_plane_pipe(plane);
@@ -357,11 +731,18 @@
 				pstate->stage,
 				plane->base.id,
 				sde_plane_pipe(plane) - SSPP_VIG0,
-				plane->state->fb ?
-				plane->state->fb->base.id : -1);
+				state->fb ? state->fb->base.id : -1);
 
 		format = to_sde_format(msm_framebuffer_format(pstate->base.fb));
 
+		SDE_EVT32(DRMID(plane), state->src_x, state->src_y,
+			state->src_w >> 16, state->src_h >> 16, state->crtc_x,
+			state->crtc_y, state->crtc_w, state->crtc_h);
+		SDE_EVT32(DRMID(plane), DRMID(crtc), lm_idx, lm_right,
+			pstate->stage, pstate->multirect_index,
+			pstate->multirect_mode, format->base.pixel_format,
+			fb ? fb->modifier[0] : 0);
+
 		/* blend config update */
 		if (pstate->stage != SDE_STAGE_BASE) {
 			_sde_crtc_setup_blend_cfg(mixer + lm_idx, pstate,
@@ -504,7 +885,7 @@
 
 	sde_crtc = to_sde_crtc(crtc);
 	cstate = to_sde_crtc_state(crtc->state);
-	SDE_EVT32(DRMID(crtc));
+	SDE_EVT32_VERBOSE(DRMID(crtc));
 
 	/* identify connectors attached to this crtc */
 	cstate->num_connectors = 0;
@@ -586,7 +967,7 @@
 	_sde_crtc_complete_flip(crtc, NULL);
 	drm_crtc_handle_vblank(crtc);
 	DRM_DEBUG_VBL("crtc%d\n", crtc->base.id);
-	SDE_EVT32_IRQ(DRMID(crtc));
+	SDE_EVT32_VERBOSE(DRMID(crtc));
 }
 
 static void sde_crtc_frame_event_work(struct kthread_work *work)
@@ -632,7 +1013,8 @@
 					crtc->base.id,
 					ktime_to_ns(fevent->ts),
 					atomic_read(&sde_crtc->frame_pending));
-			SDE_EVT32(DRMID(crtc), fevent->event, 0);
+			SDE_EVT32(DRMID(crtc), fevent->event,
+							SDE_EVTLOG_FUNC_CASE1);
 
 			/* don't propagate unexpected frame done events */
 			return;
@@ -641,16 +1023,18 @@
 			SDE_DEBUG("crtc%d ts:%lld last pending\n",
 					crtc->base.id,
 					ktime_to_ns(fevent->ts));
-			SDE_EVT32(DRMID(crtc), fevent->event, 1);
+			SDE_EVT32(DRMID(crtc), fevent->event,
+							SDE_EVTLOG_FUNC_CASE2);
 			sde_core_perf_crtc_release_bw(crtc);
 		} else {
-			SDE_EVT32(DRMID(crtc), fevent->event, 2);
+			SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event,
+							SDE_EVTLOG_FUNC_CASE3);
 		}
 	} else {
 		SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id,
 				ktime_to_ns(fevent->ts),
 				fevent->event);
-		SDE_EVT32(DRMID(crtc), fevent->event, 3);
+		SDE_EVT32(DRMID(crtc), fevent->event, SDE_EVTLOG_FUNC_CASE4);
 	}
 
 	if (fevent->event & SDE_ENCODER_FRAME_EVENT_PANEL_DEAD)
@@ -680,8 +1064,7 @@
 	pipe_id = drm_crtc_index(crtc);
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
-
-	SDE_EVT32(DRMID(crtc), event);
+	SDE_EVT32_VERBOSE(DRMID(crtc));
 
 	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
 	fevent = list_first_entry_or_null(&sde_crtc->frame_event_list,
@@ -717,7 +1100,7 @@
 
 	sde_crtc = to_sde_crtc(crtc);
 	cstate = to_sde_crtc_state(crtc->state);
-	SDE_EVT32(DRMID(crtc));
+	SDE_EVT32_VERBOSE(DRMID(crtc));
 
 	/* signal output fence(s) at end of commit */
 	sde_fence_signal(&sde_crtc->output_fence, 0);
@@ -1071,6 +1454,8 @@
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
 
+	_sde_crtc_rp_destroy(&cstate->rp);
+
 	__drm_atomic_helper_crtc_destroy_state(state);
 
 	/* destroy value helper */
@@ -1262,6 +1647,8 @@
 	/* duplicate base helper */
 	__drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
 
+	_sde_crtc_rp_duplicate(&old_cstate->rp, &cstate->rp);
+
 	return &cstate->base;
 }
 
@@ -1304,6 +1691,8 @@
 
 	_sde_crtc_set_input_fence_timeout(cstate);
 
+	_sde_crtc_rp_reset(&cstate->rp);
+
 	cstate->base.crtc = crtc;
 	crtc->state = &cstate->base;
 }
@@ -1335,7 +1724,8 @@
 	if (atomic_read(&sde_crtc->vblank_refcount) && !sde_crtc->suspend) {
 		SDE_ERROR("crtc%d invalid vblank refcount\n",
 				crtc->base.id);
-		SDE_EVT32(DRMID(crtc));
+		SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->vblank_refcount),
+							SDE_EVTLOG_FUNC_CASE1);
 		drm_for_each_encoder(encoder, crtc->dev) {
 			if (encoder->crtc != crtc)
 				continue;
@@ -1349,7 +1739,8 @@
 		/* release bandwidth and other resources */
 		SDE_ERROR("crtc%d invalid frame pending\n",
 				crtc->base.id);
-		SDE_EVT32(DRMID(crtc));
+		SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->frame_pending),
+							SDE_EVTLOG_FUNC_CASE2);
 		sde_core_perf_crtc_release_bw(crtc);
 		atomic_set(&sde_crtc->frame_pending, 0);
 	}
@@ -1487,14 +1878,15 @@
 		return -EINVAL;
 	}
 
+	sde_crtc = to_sde_crtc(crtc);
+	cstate = to_sde_crtc_state(state);
+
 	if (!state->enable || !state->active) {
 		SDE_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
 				crtc->base.id, state->enable, state->active);
-		return 0;
+		goto end;
 	}
 
-	sde_crtc = to_sde_crtc(crtc);
-	cstate = to_sde_crtc_state(state);
 	mode = &state->adjusted_mode;
 	SDE_DEBUG("%s: check", sde_crtc->name);
 
@@ -1702,6 +2094,7 @@
 
 
 end:
+	_sde_crtc_rp_free_unused(&cstate->rp);
 	return rc;
 }
 
@@ -1907,6 +2300,9 @@
 		}
 		if (ret)
 			DRM_ERROR("failed to set the property\n");
+
+		SDE_DEBUG("crtc%d %s[%d] <= 0x%llx ret=%d\n", crtc->base.id,
+				property->name, property->base.id, val, ret);
 	}
 
 	return ret;
@@ -2209,6 +2605,7 @@
 {
 	struct drm_crtc *crtc = (struct drm_crtc *) s->private;
 	struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
+	struct sde_crtc_res *res;
 
 	seq_printf(s, "num_connectors: %d\n", cstate->num_connectors);
 	seq_printf(s, "client type: %d\n", sde_crtc_get_client_type(crtc));
@@ -2218,6 +2615,13 @@
 	seq_printf(s, "max_per_pipe_ib: %llu\n",
 			cstate->cur_perf.max_per_pipe_ib);
 
+	seq_printf(s, "rp.%d: ", cstate->rp.sequence_id);
+	list_for_each_entry(res, &cstate->rp.res_list, list)
+		seq_printf(s, "0x%x/0x%llx/%pK/%d ",
+				res->type, res->tag, res->val,
+				atomic_read(&res->refcount));
+	seq_puts(s, "\n");
+
 	return 0;
 }
 DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_crtc_debugfs_state);
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
index ad077b5..19ae27f 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.h
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -25,6 +25,7 @@
 #include "sde_fence.h"
 #include "sde_kms.h"
 #include "sde_core_perf.h"
+#include "sde_hw_blk.h"
 
 #define SDE_CRTC_NAME_SIZE	12
 
@@ -191,6 +192,56 @@
 #define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
 
 /**
+ * struct sde_crtc_res_ops - common operations for crtc resources
+ * @get: get given resource
+ * @put: put given resource
+ */
+struct sde_crtc_res_ops {
+	void *(*get)(void *val, u32 type, u64 tag);
+	void (*put)(void *val);
+};
+
+/* crtc resource type (0x0-0xffff reserved for hw block type */
+#define SDE_CRTC_RES_ROT_OUT_FBO	0x10000
+#define SDE_CRTC_RES_ROT_OUT_FB		0x10001
+#define SDE_CRTC_RES_ROT_PLANE		0x10002
+#define SDE_CRTC_RES_ROT_IN_FB		0x10003
+
+#define SDE_CRTC_RES_FLAG_FREE		BIT(0)
+
+/**
+ * struct sde_crtc_res - definition of crtc resources
+ * @list: list of crtc resource
+ * @type: crtc resource type
+ * @tag: unique identifier per type
+ * @refcount: reference/usage count
+ * @ops: callback operations
+ * @val: resource handle associated with type/tag
+ * @flags: customization flags
+ */
+struct sde_crtc_res {
+	struct list_head list;
+	u32 type;
+	u64 tag;
+	atomic_t refcount;
+	struct sde_crtc_res_ops ops;
+	void *val;
+	u32 flags;
+};
+
+/**
+ * sde_crtc_respool - crtc resource pool
+ * @sequence_id: sequence identifier, incremented per state duplication
+ * @res_list: list of resource managed by this resource pool
+ * @ops: resource operations for parent resource pool
+ */
+struct sde_crtc_respool {
+	u32 sequence_id;
+	struct list_head res_list;
+	struct sde_crtc_res_ops ops;
+};
+
+/**
  * struct sde_crtc_state - sde container for atomic crtc state
  * @base: Base drm crtc state structure
  * @connectors    : Currently associated drm connectors
@@ -226,6 +277,8 @@
 	struct sde_core_perf_params new_perf;
 	struct sde_ctl_sbuf_cfg sbuf_cfg;
 	u64 sbuf_prefill_line;
+
+	struct sde_crtc_respool rp;
 };
 
 #define to_sde_crtc_state(x) \
@@ -370,4 +423,34 @@
 int sde_crtc_event_queue(struct drm_crtc *crtc,
 		void (*func)(struct drm_crtc *crtc, void *usr), void *usr);
 
+/**
+ * sde_crtc_res_add - add given resource to resource pool in crtc state
+ * @state: Pointer to drm crtc state
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * @val: Resource handle
+ * @ops: Resource callback operations
+ * return: 0 if success; error code otherwise
+ */
+int sde_crtc_res_add(struct drm_crtc_state *state, u32 type, u64 tag,
+		void *val, struct sde_crtc_res_ops *ops);
+
+/**
+ * sde_crtc_res_get - get given resource from resource pool in crtc state
+ * @state: Pointer to drm crtc state
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: Resource handle if success; pointer error or null otherwise
+ */
+void *sde_crtc_res_get(struct drm_crtc_state *state, u32 type, u64 tag);
+
+/**
+ * sde_crtc_res_put - return given resource to resource pool in crtc state
+ * @state: Pointer to drm crtc state
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: None
+ */
+void sde_crtc_res_put(struct drm_crtc_state *state, u32 type, u64 tag);
+
 #endif /* _SDE_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 7137aaa..20d5e52 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -1013,7 +1013,7 @@
 		if (sde_enc->phys_encs[i] == ready_phys) {
 			clear_bit(i, sde_enc->frame_busy_mask);
 			sde_enc->crtc_frame_event |= event;
-			SDE_EVT32(DRMID(drm_enc), i,
+			SDE_EVT32_VERBOSE(DRMID(drm_enc), i,
 					sde_enc->frame_busy_mask[0]);
 		}
 
@@ -1053,14 +1053,18 @@
 	}
 
 	pending_kickoff_cnt = sde_encoder_phys_inc_pending(phys);
-	SDE_EVT32(DRMID(&to_sde_encoder_virt(drm_enc)->base),
-			phys->intf_idx, pending_kickoff_cnt);
 
 	if (extra_flush_bits && ctl->ops.update_pending_flush)
 		ctl->ops.update_pending_flush(ctl, extra_flush_bits);
 
 	ctl->ops.trigger_flush(ctl);
-	SDE_EVT32(DRMID(drm_enc), ctl->idx);
+
+	if (ctl->ops.get_pending_flush)
+		SDE_EVT32(DRMID(drm_enc), phys->intf_idx, pending_kickoff_cnt,
+			ctl->idx, ctl->ops.get_pending_flush(ctl));
+	else
+		SDE_EVT32(DRMID(drm_enc), phys->intf_idx, ctl->idx,
+						pending_kickoff_cnt);
 }
 
 /**
@@ -1081,7 +1085,6 @@
 void sde_encoder_helper_trigger_start(struct sde_encoder_phys *phys_enc)
 {
 	struct sde_hw_ctl *ctl;
-	int ctl_idx = -1;
 
 	if (!phys_enc) {
 		SDE_ERROR("invalid encoder\n");
@@ -1091,11 +1094,8 @@
 	ctl = phys_enc->hw_ctl;
 	if (ctl && ctl->ops.trigger_start) {
 		ctl->ops.trigger_start(ctl);
-		ctl_idx = ctl->idx;
+		SDE_EVT32(DRMID(phys_enc->parent), ctl->idx);
 	}
-
-	if (phys_enc && phys_enc->parent)
-		SDE_EVT32(DRMID(phys_enc->parent), ctl_idx);
 }
 
 int sde_encoder_helper_wait_event_timeout(
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 86e292f..a68da4e 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -167,6 +167,10 @@
 		do_log = true;
 	}
 
+	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
+			cmd_enc->pp_timeout_report_cnt,
+			atomic_read(&phys_enc->pending_kickoff_cnt));
+
 	/* to avoid flooding, only log first time, and "dead" time */
 	if (do_log) {
 		SDE_ERROR_CMDENC(cmd_enc,
@@ -176,10 +180,7 @@
 				cmd_enc->pp_timeout_report_cnt,
 				atomic_read(&phys_enc->pending_kickoff_cnt));
 
-		SDE_EVT32(DRMID(phys_enc->parent),
-				phys_enc->hw_pp->idx - PINGPONG_0,
-				0xbad, cmd_enc->pp_timeout_report_cnt,
-				atomic_read(&phys_enc->pending_kickoff_cnt));
+		SDE_EVT32(DRMID(phys_enc->parent), SDE_EVTLOG_FATAL);
 
 		SDE_DBG_DUMP("sde", "dsi0_ctrl", "dsi0_phy", "dsi1_ctrl",
 				"dsi1_phy", "vbif", "dbg_bus",
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index 01d0d20..e7f3df7 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -636,8 +636,8 @@
 
 	color = _sde_format_get_media_color_ubwc(fmt);
 	if (color < 0) {
-		DRM_ERROR("UBWC format not supported for fmt:0x%X\n",
-			fmt->base.pixel_format);
+		DRM_ERROR("UBWC format not supported for fmt: %4.4s\n",
+			(char *)&fmt->base.pixel_format);
 		return -EINVAL;
 	}
 
@@ -1123,21 +1123,23 @@
 	case DRM_FORMAT_MOD_QCOM_COMPRESSED | DRM_FORMAT_MOD_QCOM_TILE:
 		map = sde_format_map_ubwc;
 		map_size = ARRAY_SIZE(sde_format_map_ubwc);
-		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
-				format);
+		SDE_DEBUG("found fmt: %4.4s  DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
+				(char *)&format);
 		break;
 	case DRM_FORMAT_MOD_QCOM_DX:
 		map = sde_format_map_p010;
 		map_size = ARRAY_SIZE(sde_format_map_p010);
-		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_DX\n", format);
+		SDE_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_DX\n",
+				(char *)&format);
 		break;
 	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED):
 	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
 			DRM_FORMAT_MOD_QCOM_TILE):
 		map = sde_format_map_p010_ubwc;
 		map_size = ARRAY_SIZE(sde_format_map_p010_ubwc);
-		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED/DX\n",
-				format);
+		SDE_DEBUG(
+			"found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED/DX\n",
+				(char *)&format);
 		break;
 	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
 		DRM_FORMAT_MOD_QCOM_TIGHT):
@@ -1146,26 +1148,28 @@
 		map = sde_format_map_tp10_ubwc;
 		map_size = ARRAY_SIZE(sde_format_map_tp10_ubwc);
 		SDE_DEBUG(
-			"found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED/DX/TIGHT\n",
-			format);
+			"found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED/DX/TIGHT\n",
+				(char *)&format);
 		break;
 	case DRM_FORMAT_MOD_QCOM_TILE:
 		map = sde_format_map_tile;
 		map_size = ARRAY_SIZE(sde_format_map_tile);
-		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE\n", format);
+		SDE_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_TILE\n",
+				(char *)&format);
 		break;
 	case (DRM_FORMAT_MOD_QCOM_TILE | DRM_FORMAT_MOD_QCOM_DX):
 		map = sde_format_map_p010_tile;
 		map_size = ARRAY_SIZE(sde_format_map_p010_tile);
-		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE/DX\n",
-				format);
+		SDE_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_TILE/DX\n",
+				(char *)&format);
 		break;
 	case (DRM_FORMAT_MOD_QCOM_TILE | DRM_FORMAT_MOD_QCOM_DX |
 			DRM_FORMAT_MOD_QCOM_TIGHT):
 		map = sde_format_map_tp10_tile;
 		map_size = ARRAY_SIZE(sde_format_map_tp10_tile);
-		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE/DX/TIGHT\n",
-				format);
+		SDE_DEBUG(
+			"found fmt: %4.4s DRM_FORMAT_MOD_QCOM_TILE/DX/TIGHT\n",
+				(char *)&format);
 		break;
 	default:
 		SDE_ERROR("unsupported format modifier %llX\n", mod0);
@@ -1180,11 +1184,11 @@
 	}
 
 	if (fmt == NULL)
-		SDE_ERROR("unsupported fmt 0x%X modifier 0x%llX\n",
-				format, mod0);
+		SDE_ERROR("unsupported fmt: %4.4s modifier 0x%llX\n",
+				(char *)&format, mod0);
 	else
-		SDE_DEBUG("fmt %s mod 0x%llX ubwc %d yuv %d\n",
-				drm_get_format_name(format), mod0,
+		SDE_DEBUG("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
+				(char *)&format, mod0,
 				SDE_FORMAT_IS_UBWC(fmt),
 				SDE_FORMAT_IS_YUV(fmt));
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_blk.c b/drivers/gpu/drm/msm/sde/sde_hw_blk.c
index 5ac017c..f59864d 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_blk.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_blk.c
@@ -29,9 +29,11 @@
  * sde_hw_blk_init - initialize hw block object
  * @type: hw block type - enum sde_hw_blk_type
  * @id: instance id of the hw block
+ * @ops: Pointer to block operations
  * return: 0 if success; error code otherwise
  */
-int sde_hw_blk_init(struct sde_hw_blk *hw_blk, u32 type, int id)
+int sde_hw_blk_init(struct sde_hw_blk *hw_blk, u32 type, int id,
+		struct sde_hw_blk_ops *ops)
 {
 	if (!hw_blk) {
 		pr_err("invalid parameters\n");
@@ -42,7 +44,9 @@
 	hw_blk->type = type;
 	hw_blk->id = id;
 	atomic_set(&hw_blk->refcount, 0);
-	INIT_LIST_HEAD(&hw_blk->attach_list);
+
+	if (ops)
+		hw_blk->ops = *ops;
 
 	mutex_lock(&sde_hw_blk_lock);
 	list_add(&hw_blk->list, &sde_hw_blk_list);
@@ -58,8 +62,6 @@
  */
 void sde_hw_blk_destroy(struct sde_hw_blk *hw_blk)
 {
-	struct sde_hw_blk_attachment *curr, *next;
-
 	if (!hw_blk) {
 		pr_err("invalid parameters\n");
 		return;
@@ -69,14 +71,6 @@
 		pr_err("hw_blk:%d.%d invalid refcount\n", hw_blk->type,
 				hw_blk->id);
 
-	list_for_each_entry_safe(curr, next, &hw_blk->attach_list, list) {
-		pr_err("hw_blk:%d.%d tag:0x%x/0x%llx still attached\n",
-				hw_blk->type, hw_blk->id,
-				curr->tag, (u64) curr->value);
-		list_del_init(&curr->list);
-		kfree(curr);
-	}
-
 	mutex_lock(&sde_hw_blk_lock);
 	list_del(&hw_blk->list);
 	mutex_unlock(&sde_hw_blk_lock);
@@ -92,6 +86,7 @@
 struct sde_hw_blk *sde_hw_blk_get(struct sde_hw_blk *hw_blk, u32 type, int id)
 {
 	struct sde_hw_blk *curr;
+	int rc, refcount;
 
 	if (!hw_blk) {
 		mutex_lock(&sde_hw_blk_lock);
@@ -108,16 +103,28 @@
 		mutex_unlock(&sde_hw_blk_lock);
 	}
 
-	if (hw_blk) {
-		int refcount = atomic_inc_return(&hw_blk->refcount);
-
-		pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type,
-				hw_blk->id, refcount);
-	} else {
-		pr_err("no hw_blk:%d\n", type);
+	if (!hw_blk) {
+		pr_debug("no hw_blk:%d\n", type);
+		return NULL;
 	}
 
+	refcount = atomic_inc_return(&hw_blk->refcount);
+
+	if (refcount == 1 && hw_blk->ops.start) {
+		rc = hw_blk->ops.start(hw_blk);
+		if (rc) {
+			pr_err("failed to start  hw_blk:%d rc:%d\n", type, rc);
+			goto error_start;
+		}
+	}
+
+	pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type,
+			hw_blk->id, refcount);
 	return hw_blk;
+
+error_start:
+	sde_hw_blk_put(hw_blk);
+	return ERR_PTR(rc);
 }
 
 /**
@@ -125,11 +132,8 @@
  * @hw_blk: hw block to be freed
  * @free_blk: function to be called when reference count goes to zero
  */
-void sde_hw_blk_put(struct sde_hw_blk *hw_blk,
-		void (*free_blk)(struct sde_hw_blk *))
+void sde_hw_blk_put(struct sde_hw_blk *hw_blk)
 {
-	struct sde_hw_blk_attachment *curr, *next;
-
 	if (!hw_blk) {
 		pr_err("invalid parameters\n");
 		return;
@@ -146,122 +150,6 @@
 	if (atomic_dec_return(&hw_blk->refcount))
 		return;
 
-	if (free_blk)
-		free_blk(hw_blk);
-
-	/* report any residual attachments */
-	list_for_each_entry_safe(curr, next, &hw_blk->attach_list, list) {
-		pr_err("hw_blk:%d.%d tag:0x%x/0x%llx still attached\n",
-				hw_blk->type, hw_blk->id,
-				curr->tag, (u64) curr->value);
-		list_del_init(&curr->list);
-		kfree(curr);
-	}
-}
-
-/**
- * sde_hw_blk_lookup_blk - lookup hardware block that matches tag/value/type
- *	tuple and increment reference count
- * @tag: search tag
- * @value: value associated with search tag
- * @type: hardware block type
- * return: Pointer to hardware block
- */
-struct sde_hw_blk *sde_hw_blk_lookup_blk(u32 tag, void *value, u32 type)
-{
-	struct sde_hw_blk *hw_blk = NULL, *curr;
-	struct sde_hw_blk_attachment *attach;
-
-	pr_debug("hw_blk:%d tag:0x%x/0x%llx\n", type, tag, (u64) value);
-
-	mutex_lock(&sde_hw_blk_lock);
-	list_for_each_entry(curr, &sde_hw_blk_list, list) {
-		if ((curr->type != type) || !atomic_read(&curr->refcount))
-			continue;
-
-		list_for_each_entry(attach, &curr->attach_list, list) {
-			if ((attach->tag != tag) || (attach->value != value))
-				continue;
-
-			hw_blk = curr;
-			break;
-		}
-
-		if (hw_blk)
-			break;
-	}
-	mutex_unlock(&sde_hw_blk_lock);
-
-	if (hw_blk)
-		sde_hw_blk_get(hw_blk, 0, -1);
-
-	return hw_blk;
-}
-
-/**
- * sde_hw_blk_attach - attach given tag/value pair to hardware block
- *	and increment reference count
- * @hw_blk: Pointer hardware block
- * @tag: search tag
- * @value: value associated with search tag
- * return: 0 if success; error code otherwise
- */
-int sde_hw_blk_attach(struct sde_hw_blk *hw_blk, u32 tag, void *value)
-{
-	struct sde_hw_blk_attachment *attach;
-
-	if (!hw_blk) {
-		pr_err("invalid parameters\n");
-		return -EINVAL;
-	}
-
-	pr_debug("hw_blk:%d.%d tag:0x%x/0x%llx\n", hw_blk->type, hw_blk->id,
-			tag, (u64) value);
-
-	attach = kzalloc(sizeof(struct sde_hw_blk_attachment), GFP_KERNEL);
-	if (!attach)
-		return -ENOMEM;
-
-	INIT_LIST_HEAD(&attach->list);
-	attach->tag = tag;
-	attach->value = value;
-	/* always add to the front so latest shows up first in search */
-	list_add(&attach->list, &hw_blk->attach_list);
-	sde_hw_blk_get(hw_blk, 0, -1);
-
-	return 0;
-}
-
-/**
- * sde_hw_blk_detach - detach given tag/value pair from hardware block
- *	and decrement reference count
- * @hw_blk: Pointer hardware block
- * @tag: search tag
- * @value: value associated with search tag
- * return: none
- */
-void sde_hw_blk_detach(struct sde_hw_blk *hw_blk, u32 tag, void *value)
-{
-	struct sde_hw_blk_attachment *curr, *next;
-
-	if (!hw_blk) {
-		pr_err("invalid parameters\n");
-		return;
-	}
-
-	pr_debug("hw_blk:%d.%d tag:0x%x/0x%llx\n", hw_blk->type, hw_blk->id,
-			tag, (u64) value);
-
-	list_for_each_entry_safe(curr, next, &hw_blk->attach_list, list) {
-		if ((curr->tag != tag) || (curr->value != value))
-			continue;
-
-		list_del_init(&curr->list);
-		kfree(curr);
-		sde_hw_blk_put(hw_blk, NULL);
-		return;
-	}
-
-	pr_err("hw_blk:%d.%d tag:0x%x/0x%llx not found\n", hw_blk->type,
-			hw_blk->id, tag, (u64) value);
+	if (hw_blk->ops.stop)
+		hw_blk->ops.stop(hw_blk);
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_blk.h b/drivers/gpu/drm/msm/sde/sde_hw_blk.h
index ea4ba08..d979091 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_blk.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_blk.h
@@ -17,16 +17,16 @@
 #include <linux/list.h>
 #include <linux/atomic.h>
 
+struct sde_hw_blk;
+
 /**
- * struct sde_hw_blk_attachment - hardware block attachment
- * @list: list of attachment
- * @tag: search tag
- * @value: value associated with the given tag
+ * struct sde_hw_blk_ops - common hardware block operations
+ * @start: start operation on first get
+ * @stop: stop operation on last put
  */
-struct sde_hw_blk_attachment {
-	struct list_head list;
-	u32 tag;
-	void *value;
+struct sde_hw_blk_ops {
+	int (*start)(struct sde_hw_blk *);
+	void (*stop)(struct sde_hw_blk *);
 };
 
 /**
@@ -35,53 +35,19 @@
  * @type: hardware block type
  * @id: instance id
  * @refcount: reference/usage count
- * @attachment_list: list of attachment
  */
 struct sde_hw_blk {
 	struct list_head list;
 	u32 type;
 	int id;
 	atomic_t refcount;
-	struct list_head attach_list;
+	struct sde_hw_blk_ops ops;
 };
 
-int sde_hw_blk_init(struct sde_hw_blk *hw_blk, u32 type, int id);
+int sde_hw_blk_init(struct sde_hw_blk *hw_blk, u32 type, int id,
+		struct sde_hw_blk_ops *ops);
 void sde_hw_blk_destroy(struct sde_hw_blk *hw_blk);
 
 struct sde_hw_blk *sde_hw_blk_get(struct sde_hw_blk *hw_blk, u32 type, int id);
-void sde_hw_blk_put(struct sde_hw_blk *hw_blk,
-		void (*blk_free)(struct sde_hw_blk *));
-
-struct sde_hw_blk *sde_hw_blk_lookup_blk(u32 tag, void *value, u32 type);
-int sde_hw_blk_attach(struct sde_hw_blk *hw_blk, u32 tag, void *value);
-void sde_hw_blk_detach(struct sde_hw_blk *hw_blk, u32 tag, void *value);
-
-/**
- * sde_hw_blk_lookup_value - return value associated with the given tag
- * @hw_blk: Pointer to hardware block
- * @tag: tag to find
- * @idx: index if more than one value found, with 0 being first
- * return: value associated with the given tag
- */
-static inline void *sde_hw_blk_lookup_value(struct sde_hw_blk *hw_blk,
-		u32 tag, u32 idx)
-{
-	struct sde_hw_blk_attachment *attach;
-
-	if (!hw_blk)
-		return NULL;
-
-	list_for_each_entry(attach, &hw_blk->attach_list, list) {
-		if (attach->tag != tag)
-			continue;
-
-		if (idx == 0)
-			return attach->value;
-
-		idx--;
-	}
-
-	return NULL;
-}
-
+void sde_hw_blk_put(struct sde_hw_blk *hw_blk);
 #endif /*_SDE_HW_BLK_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 9285487..11cca1f 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -752,7 +752,8 @@
 	sblk->maxupscale = MAX_SSPP_UPSCALE;
 	sblk->maxdwnscale = MAX_SSPP_DOWNSCALE;
 	sspp->id = SSPP_VIG0 + *vig_count;
-	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", sspp->id);
+	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
+			sspp->id - SSPP_VIG0);
 	sspp->clk_ctrl = SDE_CLK_CTRL_VIG0 + *vig_count;
 	sspp->type = SSPP_TYPE_VIG;
 	set_bit(SDE_SSPP_QOS, &sspp->features);
@@ -769,7 +770,7 @@
 		sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
 			VIG_QSEED_LEN, 0);
 		snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
-				"sspp_scaler%u", sspp->id);
+				"sspp_scaler%u", sspp->id - SSPP_VIG0);
 	} else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) {
 		set_bit(SDE_SSPP_SCALER_QSEED3, &sspp->features);
 		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3;
@@ -778,7 +779,7 @@
 		sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
 			VIG_QSEED_LEN, 0);
 		snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
-			"sspp_scaler%u", sspp->id);
+			"sspp_scaler%u", sspp->id - SSPP_VIG0);
 	}
 
 	if (sde_cfg->has_sbuf)
@@ -786,7 +787,7 @@
 
 	sblk->csc_blk.id = SDE_SSPP_CSC;
 	snprintf(sblk->csc_blk.name, SDE_HW_BLK_NAME_LEN,
-			"sspp_csc%u", sspp->id);
+			"sspp_csc%u", sspp->id - SSPP_VIG0);
 	if (sde_cfg->csc_type == SDE_SSPP_CSC) {
 		set_bit(SDE_SSPP_CSC, &sspp->features);
 		sblk->csc_blk.base = PROP_VALUE_ACCESS(prop_value,
@@ -799,7 +800,7 @@
 
 	sblk->hsic_blk.id = SDE_SSPP_HSIC;
 	snprintf(sblk->hsic_blk.name, SDE_HW_BLK_NAME_LEN,
-			"sspp_hsic%u", sspp->id);
+			"sspp_hsic%u", sspp->id - SSPP_VIG0);
 	if (prop_exists[VIG_HSIC_PROP]) {
 		sblk->hsic_blk.base = PROP_VALUE_ACCESS(prop_value,
 			VIG_HSIC_PROP, 0);
@@ -811,7 +812,7 @@
 
 	sblk->memcolor_blk.id = SDE_SSPP_MEMCOLOR;
 	snprintf(sblk->memcolor_blk.name, SDE_HW_BLK_NAME_LEN,
-			"sspp_memcolor%u", sspp->id);
+			"sspp_memcolor%u", sspp->id - SSPP_VIG0);
 	if (prop_exists[VIG_MEMCOLOR_PROP]) {
 		sblk->memcolor_blk.base = PROP_VALUE_ACCESS(prop_value,
 			VIG_MEMCOLOR_PROP, 0);
@@ -823,7 +824,7 @@
 
 	sblk->pcc_blk.id = SDE_SSPP_PCC;
 	snprintf(sblk->pcc_blk.name, SDE_HW_BLK_NAME_LEN,
-			"sspp_pcc%u", sspp->id);
+			"sspp_pcc%u", sspp->id - SSPP_VIG0);
 	if (prop_exists[VIG_PCC_PROP]) {
 		sblk->pcc_blk.base = PROP_VALUE_ACCESS(prop_value,
 			VIG_PCC_PROP, 0);
@@ -843,7 +844,8 @@
 	sblk->maxupscale = MAX_SSPP_UPSCALE;
 	sblk->maxdwnscale = MAX_SSPP_DOWNSCALE;
 	sspp->id = SSPP_RGB0 + *rgb_count;
-	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", sspp->id);
+	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
+			sspp->id - SSPP_VIG0);
 	sspp->clk_ctrl = SDE_CLK_CTRL_RGB0 + *rgb_count;
 	sspp->type = SSPP_TYPE_RGB;
 	set_bit(SDE_SSPP_QOS, &sspp->features);
@@ -860,7 +862,7 @@
 		sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
 			RGB_SCALER_LEN, 0);
 		snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
-			"sspp_scaler%u", sspp->id);
+			"sspp_scaler%u", sspp->id - SSPP_VIG0);
 	} else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) {
 		set_bit(SDE_SSPP_SCALER_RGB, &sspp->features);
 		sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3;
@@ -869,7 +871,7 @@
 		sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
 			SSPP_SCALE_SIZE, 0);
 		snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
-			"sspp_scaler%u", sspp->id);
+			"sspp_scaler%u", sspp->id - SSPP_VIG0);
 	}
 
 	sblk->pcc_blk.id = SDE_SSPP_PCC;
@@ -897,7 +899,8 @@
 	sblk->maxdwnscale = SSPP_UNITY_SCALE;
 	sblk->format_list = sde_cfg->cursor_formats;
 	sspp->id = SSPP_CURSOR0 + *cursor_count;
-	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", sspp->id);
+	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
+			sspp->id - SSPP_VIG0);
 	sspp->clk_ctrl = SDE_CLK_CTRL_CURSOR0 + *cursor_count;
 	sspp->type = SSPP_TYPE_CURSOR;
 	(*cursor_count)++;
@@ -912,7 +915,8 @@
 	sblk->format_list = sde_cfg->dma_formats;
 	sspp->id = SSPP_DMA0 + *dma_count;
 	sspp->clk_ctrl = SDE_CLK_CTRL_DMA0 + *dma_count;
-	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u", sspp->id);
+	snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
+			sspp->id - SSPP_VIG0);
 	sspp->type = SSPP_TYPE_DMA;
 	set_bit(SDE_SSPP_QOS, &sspp->features);
 	(*dma_count)++;
@@ -1023,8 +1027,6 @@
 			set_bit(sde_cfg->smart_dma_rev, &sspp->features);
 
 		sblk->src_blk.id = SDE_SSPP_SRC;
-		snprintf(sblk->src_blk.name, SDE_HW_BLK_NAME_LEN, "sspp_src_%u",
-				sblk->src_blk.id);
 
 		of_property_read_string_index(np,
 				sspp_prop[SSPP_TYPE].prop_name, i, &type);
@@ -1048,6 +1050,9 @@
 			goto end;
 		}
 
+		snprintf(sblk->src_blk.name, SDE_HW_BLK_NAME_LEN, "sspp_src_%u",
+				sspp->id - SSPP_VIG0);
+
 		sblk->maxhdeciexp = MAX_HORZ_DECIMATION;
 		sblk->maxvdeciexp = MAX_VERT_DECIMATION;
 
@@ -1142,7 +1147,8 @@
 		ctl->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i);
 		ctl->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0);
 		ctl->id = CTL_0 + i;
-		snprintf(ctl->name, SDE_HW_BLK_NAME_LEN, "ctl_%u", ctl->id);
+		snprintf(ctl->name, SDE_HW_BLK_NAME_LEN, "ctl_%u",
+				ctl->id - CTL_0);
 
 		if (i < MAX_SPLIT_DISPLAY_CTL)
 			set_bit(SDE_CTL_SPLIT_DISPLAY, &ctl->features);
@@ -1255,7 +1261,8 @@
 		mixer->base = PROP_VALUE_ACCESS(prop_value, MIXER_OFF, i);
 		mixer->len = PROP_VALUE_ACCESS(prop_value, MIXER_LEN, 0);
 		mixer->id = LM_0 + i;
-		snprintf(mixer->name, SDE_HW_BLK_NAME_LEN, "lm_%u", mixer->id);
+		snprintf(mixer->name, SDE_HW_BLK_NAME_LEN, "lm_%u",
+				mixer->id - LM_0);
 
 		if (!prop_exists[MIXER_LEN])
 			mixer->len = DEFAULT_SDE_HW_BLOCK_LEN;
@@ -1351,7 +1358,8 @@
 		intf->base = PROP_VALUE_ACCESS(prop_value, INTF_OFF, i);
 		intf->len = PROP_VALUE_ACCESS(prop_value, INTF_LEN, 0);
 		intf->id = INTF_0 + i;
-		snprintf(intf->name, SDE_HW_BLK_NAME_LEN, "intf_%u", intf->id);
+		snprintf(intf->name, SDE_HW_BLK_NAME_LEN, "intf_%u",
+				intf->id - INTF_0);
 
 		if (!prop_exists[INTF_LEN])
 			intf->len = DEFAULT_SDE_HW_BLOCK_LEN;
@@ -1434,7 +1442,8 @@
 
 		wb->base = PROP_VALUE_ACCESS(prop_value, WB_OFF, i);
 		wb->id = WB_0 + PROP_VALUE_ACCESS(prop_value, WB_ID, i);
-		snprintf(wb->name, SDE_HW_BLK_NAME_LEN, "wb_%u", wb->id);
+		snprintf(wb->name, SDE_HW_BLK_NAME_LEN, "wb_%u",
+				wb->id - WB_0);
 		wb->clk_ctrl = SDE_CLK_CTRL_WB0 +
 			PROP_VALUE_ACCESS(prop_value, WB_ID, i);
 		wb->xin_id = PROP_VALUE_ACCESS(prop_value, WB_XIN_ID, i);
@@ -1733,7 +1742,8 @@
 		dspp->base = PROP_VALUE_ACCESS(prop_value, DSPP_OFF, i);
 		dspp->len = PROP_VALUE_ACCESS(prop_value, DSPP_SIZE, 0);
 		dspp->id = DSPP_0 + i;
-		snprintf(dspp->name, SDE_HW_BLK_NAME_LEN, "dspp_%u", dspp->id);
+		snprintf(dspp->name, SDE_HW_BLK_NAME_LEN, "dspp_%u",
+				dspp->id - DSPP_0);
 
 		sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
 		if (!sblk) {
@@ -1805,6 +1815,9 @@
 		dsc->base = PROP_VALUE_ACCESS(prop_value, DSC_OFF, i);
 		dsc->id = DSC_0 + i;
 		dsc->len = PROP_VALUE_ACCESS(prop_value, DSC_LEN, 0);
+		snprintf(dsc->name, SDE_HW_BLK_NAME_LEN, "dsc_%u",
+				dsc->id - DSC_0);
+
 		if (!prop_exists[DSC_LEN])
 			dsc->len = DEFAULT_SDE_HW_BLOCK_LEN;
 	}
@@ -1852,7 +1865,8 @@
 		cdm = sde_cfg->cdm + i;
 		cdm->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i);
 		cdm->id = CDM_0 + i;
-		snprintf(cdm->name, SDE_HW_BLK_NAME_LEN, "cdm_%u", cdm->id);
+		snprintf(cdm->name, SDE_HW_BLK_NAME_LEN, "cdm_%u",
+				cdm->id - CDM_0);
 		cdm->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0);
 
 		/* intf3 and wb2 for cdm block */
@@ -1918,6 +1932,8 @@
 		vbif->base = PROP_VALUE_ACCESS(prop_value, VBIF_OFF, i);
 		vbif->len = vbif_len;
 		vbif->id = VBIF_0 + PROP_VALUE_ACCESS(prop_value, VBIF_ID, i);
+		snprintf(vbif->name, SDE_HW_BLK_NAME_LEN, "vbif_%u",
+				vbif->id - VBIF_0);
 
 		SDE_DEBUG("vbif:%d\n", vbif->id - VBIF_0);
 
@@ -2044,19 +2060,21 @@
 
 		pp->base = PROP_VALUE_ACCESS(prop_value, PP_OFF, i);
 		pp->id = PINGPONG_0 + i;
-		snprintf(pp->name, SDE_HW_BLK_NAME_LEN, "pingpong_%u", pp->id);
+		snprintf(pp->name, SDE_HW_BLK_NAME_LEN, "pingpong_%u",
+				pp->id - PINGPONG_0);
 		pp->len = PROP_VALUE_ACCESS(prop_value, PP_LEN, 0);
 
 		sblk->te.base = PROP_VALUE_ACCESS(prop_value, TE_OFF, i);
 		sblk->te.id = SDE_PINGPONG_TE;
-		snprintf(sblk->te.name, SDE_HW_BLK_NAME_LEN, "te_%u", pp->id);
+		snprintf(sblk->te.name, SDE_HW_BLK_NAME_LEN, "te_%u",
+				pp->id - PINGPONG_0);
 		set_bit(SDE_PINGPONG_TE, &pp->features);
 
 		sblk->te2.base = PROP_VALUE_ACCESS(prop_value, TE2_OFF, i);
 		if (sblk->te2.base) {
 			sblk->te2.id = SDE_PINGPONG_TE2;
 			snprintf(sblk->te2.name, SDE_HW_BLK_NAME_LEN, "te2_%u",
-					pp->id);
+					pp->id - PINGPONG_0);
 			set_bit(SDE_PINGPONG_TE2, &pp->features);
 			set_bit(SDE_PINGPONG_SPLIT, &pp->features);
 		}
@@ -2068,7 +2086,7 @@
 		if (sblk->dsc.base) {
 			sblk->dsc.id = SDE_PINGPONG_DSC;
 			snprintf(sblk->dsc.name, SDE_HW_BLK_NAME_LEN, "dsc_%u",
-					pp->id);
+					pp->id - PINGPONG_0);
 			set_bit(SDE_PINGPONG_DSC, &pp->features);
 		}
 	}
@@ -2112,12 +2130,12 @@
 	cfg->mdss[0].base = MDSS_BASE_OFFSET;
 	cfg->mdss[0].id = MDP_TOP;
 	snprintf(cfg->mdss[0].name, SDE_HW_BLK_NAME_LEN, "mdss_%u",
-			cfg->mdss[0].id);
+			cfg->mdss[0].id - MDP_TOP);
 
 	cfg->mdp_count = 1;
 	cfg->mdp[0].id = MDP_TOP;
 	snprintf(cfg->mdp[0].name, SDE_HW_BLK_NAME_LEN, "top_%u",
-		cfg->mdp[0].id);
+		cfg->mdp[0].id - MDP_TOP);
 	cfg->mdp[0].base = PROP_VALUE_ACCESS(prop_value, SDE_OFF, 0);
 	cfg->mdp[0].len = PROP_VALUE_ACCESS(prop_value, SDE_LEN, 0);
 	if (!prop_exists[SDE_LEN])
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
index 18893af..ad2910e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
@@ -56,6 +56,19 @@
  */
 static u32 offsite_v_coeff[] = {0x00060002};
 
+/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
+static struct sde_csc_cfg rgb2yuv_cfg = {
+	{
+		0x0083, 0x0102, 0x0032,
+		0x1fb5, 0x1f6c, 0x00e1,
+		0x00e1, 0x1f45, 0x1fdc
+	},
+	{ 0x00, 0x00, 0x00 },
+	{ 0x0040, 0x0200, 0x0200 },
+	{ 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
+	{ 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
+};
+
 static struct sde_cdm_cfg *_cdm_offset(enum sde_cdm cdm,
 		struct sde_mdss_cfg *m,
 		void __iomem *addr,
@@ -279,6 +292,11 @@
 
 	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
 			c->hw.blk_off + c->hw.length, c->hw.xin_id);
+	/*
+	 * Perform any default initialization for the chroma down module
+	 * @setup default csc coefficients
+	 */
+	sde_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
 
 	return c;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dsc.c b/drivers/gpu/drm/msm/sde/sde_hw_dsc.c
index f546710..62193f9 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_dsc.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dsc.c
@@ -182,6 +182,7 @@
 		if (dsc == m->dsc[i].id) {
 			b->base_off = addr;
 			b->blk_off = m->dsc[i].base;
+			b->length = m->dsc[i].len;
 			b->hwversion = m->hwversion;
 			b->log_mask = SDE_DBG_MASK_DSC;
 			return &m->dsc[i];
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
index 47fb07f..d5289c0 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
@@ -844,11 +844,21 @@
 
 static int sde_hw_intr_clear_irqs(struct sde_hw_intr *intr)
 {
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
+		SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off, 0xffffffff);
+
 	return 0;
 }
 
 static int sde_hw_intr_disable_irqs(struct sde_hw_intr *intr)
 {
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
+		SDE_REG_WRITE(&intr->hw, sde_intr_set[i].en_off, 0x00000000);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.c b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
index f79dc08..01fe3c8 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.c
@@ -120,37 +120,37 @@
 		return -EINVAL;
 
 	switch (drm_pixfmt) {
-	case DRM_FORMAT_RGB565:
+	case DRM_FORMAT_BGR565:
 		if (SDE_MODIFIER_IS_UBWC(drm_modifier))
 			*pixfmt = SDE_PIX_FMT_RGB_565_UBWC;
 		else
 			*pixfmt = SDE_PIX_FMT_RGB_565;
 		break;
-	case DRM_FORMAT_ARGB8888:
+	case DRM_FORMAT_BGRA8888:
 		if (SDE_MODIFIER_IS_TILE(drm_modifier))
 			*pixfmt = SDE_PIX_FMT_ARGB_8888_TILE;
 		else
 			*pixfmt = SDE_PIX_FMT_ARGB_8888;
 		break;
-	case DRM_FORMAT_XRGB8888:
+	case DRM_FORMAT_BGRX8888:
 		if (SDE_MODIFIER_IS_TILE(drm_modifier))
 			*pixfmt = SDE_PIX_FMT_XRGB_8888_TILE;
 		else
 			*pixfmt = SDE_PIX_FMT_XRGB_8888;
 		break;
-	case DRM_FORMAT_ABGR8888:
+	case DRM_FORMAT_RGBA8888:
 		if (SDE_MODIFIER_IS_TILE(drm_modifier))
 			*pixfmt = SDE_PIX_FMT_ABGR_8888_TILE;
 		else
 			*pixfmt = SDE_PIX_FMT_ABGR_8888;
 		break;
-	case DRM_FORMAT_XBGR8888:
+	case DRM_FORMAT_RGBX8888:
 		if (SDE_MODIFIER_IS_TILE(drm_modifier))
 			*pixfmt = SDE_PIX_FMT_XBGR_8888_TILE;
 		else
 			*pixfmt = SDE_PIX_FMT_XBGR_8888;
 		break;
-	case DRM_FORMAT_RGBA8888:
+	case DRM_FORMAT_ABGR8888:
 		if (SDE_MODIFIER_IS_UBWC(drm_modifier))
 			*pixfmt = SDE_PIX_FMT_RGBA_8888_UBWC;
 		else if (SDE_MODIFIER_IS_TILE(drm_modifier))
@@ -158,7 +158,7 @@
 		else
 			*pixfmt = SDE_PIX_FMT_RGBA_8888;
 		break;
-	case DRM_FORMAT_RGBX8888:
+	case DRM_FORMAT_XBGR8888:
 		if (SDE_MODIFIER_IS_UBWC(drm_modifier))
 			*pixfmt = SDE_PIX_FMT_RGBX_8888_UBWC;
 		else if (SDE_MODIFIER_IS_TILE(drm_modifier))
@@ -166,13 +166,13 @@
 		else
 			*pixfmt = SDE_PIX_FMT_RGBX_8888;
 		break;
-	case DRM_FORMAT_BGRA8888:
+	case DRM_FORMAT_ARGB8888:
 		if (SDE_MODIFIER_IS_TILE(drm_modifier))
 			*pixfmt = SDE_PIX_FMT_BGRA_8888_TILE;
 		else
 			*pixfmt = SDE_PIX_FMT_BGRA_8888;
 		break;
-	case DRM_FORMAT_BGRX8888:
+	case DRM_FORMAT_XRGB8888:
 		if (SDE_MODIFIER_IS_TILE(drm_modifier))
 			*pixfmt = SDE_PIX_FMT_BGRX_8888_TILE;
 		else
@@ -220,43 +220,43 @@
 		else
 			*pixfmt = SDE_PIX_FMT_Y_CRCB_H2V2;
 		break;
-	case DRM_FORMAT_ARGB2101010:
+	case DRM_FORMAT_BGRA1010102:
 		if (SDE_MODIFIER_IS_TILE(drm_modifier))
 			*pixfmt = SDE_PIX_FMT_ARGB_2101010_TILE;
 		else
 			*pixfmt = SDE_PIX_FMT_ARGB_2101010;
 		break;
-	case DRM_FORMAT_XRGB2101010:
+	case DRM_FORMAT_BGRX1010102:
 		if (SDE_MODIFIER_IS_TILE(drm_modifier))
 			*pixfmt = SDE_PIX_FMT_XRGB_2101010_TILE;
 		else
 			*pixfmt = SDE_PIX_FMT_XRGB_2101010;
 		break;
-	case DRM_FORMAT_ABGR2101010:
+	case DRM_FORMAT_RGBA1010102:
 		if (SDE_MODIFIER_IS_TILE(drm_modifier))
 			*pixfmt = SDE_PIX_FMT_ABGR_2101010_TILE;
 		else
 			*pixfmt = SDE_PIX_FMT_ABGR_2101010;
 		break;
-	case DRM_FORMAT_XBGR2101010:
+	case DRM_FORMAT_RGBX1010102:
 		if (SDE_MODIFIER_IS_TILE(drm_modifier))
 			*pixfmt = SDE_PIX_FMT_XBGR_2101010_TILE;
 		else
 			*pixfmt = SDE_PIX_FMT_XBGR_2101010;
 		break;
-	case DRM_FORMAT_BGRA1010102:
+	case DRM_FORMAT_ARGB2101010:
 		if (SDE_MODIFIER_IS_TILE(drm_modifier))
 			*pixfmt = SDE_PIX_FMT_BGRA_1010102_TILE;
 		else
 			*pixfmt = SDE_PIX_FMT_BGRA_1010102;
 		break;
-	case DRM_FORMAT_BGRX1010102:
+	case DRM_FORMAT_XRGB2101010:
 		if (SDE_MODIFIER_IS_TILE(drm_modifier))
 			*pixfmt = SDE_PIX_FMT_BGRX_1010102_TILE;
 		else
 			*pixfmt = SDE_PIX_FMT_BGRX_1010102;
 		break;
-	case DRM_FORMAT_RGBA1010102:
+	case DRM_FORMAT_ABGR2101010:
 		if (SDE_MODIFIER_IS_UBWC(drm_modifier))
 			*pixfmt = SDE_PIX_FMT_RGBA_1010102_UBWC;
 		else if (SDE_MODIFIER_IS_TILE(drm_modifier))
@@ -264,7 +264,7 @@
 		else
 			*pixfmt = SDE_PIX_FMT_RGBA_1010102;
 		break;
-	case DRM_FORMAT_RGBX1010102:
+	case DRM_FORMAT_XBGR2101010:
 		if (SDE_MODIFIER_IS_UBWC(drm_modifier))
 			*pixfmt = SDE_PIX_FMT_RGBX_1010102_UBWC;
 		else if (SDE_MODIFIER_IS_TILE(drm_modifier))
@@ -298,28 +298,28 @@
 
 	switch (pixfmt) {
 	case SDE_PIX_FMT_RGB_565:
-		*drm_pixfmt = DRM_FORMAT_RGB565;
+		*drm_pixfmt = DRM_FORMAT_BGR565;
 		*drm_modifier = 0;
 		break;
 	case SDE_PIX_FMT_RGB_565_UBWC:
-		*drm_pixfmt = DRM_FORMAT_RGB565;
+		*drm_pixfmt = DRM_FORMAT_BGR565;
 		*drm_modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED |
 				DRM_FORMAT_MOD_QCOM_TILE;
 		break;
 	case SDE_PIX_FMT_RGBA_8888:
-		*drm_pixfmt = DRM_FORMAT_RGBA8888;
+		*drm_pixfmt = DRM_FORMAT_ABGR8888;
 		*drm_modifier = 0;
 		break;
 	case SDE_PIX_FMT_RGBX_8888:
-		*drm_pixfmt = DRM_FORMAT_RGBX8888;
+		*drm_pixfmt = DRM_FORMAT_XBGR8888;
 		*drm_modifier = 0;
 		break;
 	case SDE_PIX_FMT_BGRA_8888:
-		*drm_pixfmt = DRM_FORMAT_BGRA8888;
+		*drm_pixfmt = DRM_FORMAT_ARGB8888;
 		*drm_modifier = 0;
 		break;
 	case SDE_PIX_FMT_BGRX_8888:
-		*drm_pixfmt = DRM_FORMAT_BGRX8888;
+		*drm_pixfmt = DRM_FORMAT_XRGB8888;
 		*drm_modifier = 0;
 		break;
 	case SDE_PIX_FMT_Y_CBCR_H2V2_UBWC:
@@ -332,12 +332,12 @@
 		*drm_modifier = 0;
 		break;
 	case SDE_PIX_FMT_RGBA_8888_UBWC:
-		*drm_pixfmt = DRM_FORMAT_RGBA8888;
+		*drm_pixfmt = DRM_FORMAT_ABGR8888;
 		*drm_modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED |
 				DRM_FORMAT_MOD_QCOM_TILE;
 		break;
 	case SDE_PIX_FMT_RGBX_8888_UBWC:
-		*drm_pixfmt = DRM_FORMAT_RGBX8888;
+		*drm_pixfmt = DRM_FORMAT_XBGR8888;
 		*drm_modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED |
 				DRM_FORMAT_MOD_QCOM_TILE;
 		break;
@@ -346,59 +346,59 @@
 		*drm_modifier = 0;
 		break;
 	case SDE_PIX_FMT_ARGB_8888:
-		*drm_pixfmt = DRM_FORMAT_ARGB8888;
+		*drm_pixfmt = DRM_FORMAT_BGRA8888;
 		*drm_modifier = 0;
 		break;
 	case SDE_PIX_FMT_XRGB_8888:
-		*drm_pixfmt = DRM_FORMAT_XRGB8888;
+		*drm_pixfmt = DRM_FORMAT_BGRX8888;
 		*drm_modifier = 0;
 		break;
 	case SDE_PIX_FMT_ABGR_8888:
-		*drm_pixfmt = DRM_FORMAT_ABGR8888;
+		*drm_pixfmt = DRM_FORMAT_RGBA8888;
 		*drm_modifier = 0;
 		break;
 	case SDE_PIX_FMT_XBGR_8888:
-		*drm_pixfmt = DRM_FORMAT_XBGR8888;
+		*drm_pixfmt = DRM_FORMAT_RGBX8888;
 		*drm_modifier = 0;
 		break;
 	case SDE_PIX_FMT_ARGB_2101010:
-		*drm_pixfmt = DRM_FORMAT_ARGB2101010;
-		*drm_modifier = 0;
-		break;
-	case SDE_PIX_FMT_XRGB_2101010:
-		*drm_pixfmt = DRM_FORMAT_XRGB2101010;
-		*drm_modifier = 0;
-		break;
-	case SDE_PIX_FMT_ABGR_2101010:
-		*drm_pixfmt = DRM_FORMAT_ABGR2101010;
-		*drm_modifier = 0;
-		break;
-	case SDE_PIX_FMT_XBGR_2101010:
-		*drm_pixfmt = DRM_FORMAT_XBGR2101010;
-		*drm_modifier = 0;
-		break;
-	case SDE_PIX_FMT_BGRA_1010102:
 		*drm_pixfmt = DRM_FORMAT_BGRA1010102;
 		*drm_modifier = 0;
 		break;
-	case SDE_PIX_FMT_BGRX_1010102:
+	case SDE_PIX_FMT_XRGB_2101010:
 		*drm_pixfmt = DRM_FORMAT_BGRX1010102;
 		*drm_modifier = 0;
 		break;
+	case SDE_PIX_FMT_ABGR_2101010:
+		*drm_pixfmt = DRM_FORMAT_RGBA1010102;
+		*drm_modifier = 0;
+		break;
+	case SDE_PIX_FMT_XBGR_2101010:
+		*drm_pixfmt = DRM_FORMAT_RGBX1010102;
+		*drm_modifier = 0;
+		break;
+	case SDE_PIX_FMT_BGRA_1010102:
+		*drm_pixfmt = DRM_FORMAT_ARGB2101010;
+		*drm_modifier = 0;
+		break;
+	case SDE_PIX_FMT_BGRX_1010102:
+		*drm_pixfmt = DRM_FORMAT_XRGB2101010;
+		*drm_modifier = 0;
+		break;
 	case SDE_PIX_FMT_RGBA_8888_TILE:
-		*drm_pixfmt = DRM_FORMAT_RGBA8888;
+		*drm_pixfmt = DRM_FORMAT_ABGR8888;
 		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
 		break;
 	case SDE_PIX_FMT_RGBX_8888_TILE:
-		*drm_pixfmt = DRM_FORMAT_RGBX8888;
+		*drm_pixfmt = DRM_FORMAT_XBGR8888;
 		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
 		break;
 	case SDE_PIX_FMT_BGRA_8888_TILE:
-		*drm_pixfmt = DRM_FORMAT_BGRA8888;
+		*drm_pixfmt = DRM_FORMAT_ARGB8888;
 		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
 		break;
 	case SDE_PIX_FMT_BGRX_8888_TILE:
-		*drm_pixfmt = DRM_FORMAT_BGRX8888;
+		*drm_pixfmt = DRM_FORMAT_XRGB8888;
 		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
 		break;
 	case SDE_PIX_FMT_Y_CRCB_H2V2_TILE:
@@ -410,45 +410,55 @@
 		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
 		break;
 	case SDE_PIX_FMT_ARGB_8888_TILE:
-		*drm_pixfmt = DRM_FORMAT_ARGB8888;
+		*drm_pixfmt = DRM_FORMAT_BGRA8888;
 		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
 		break;
 	case SDE_PIX_FMT_XRGB_8888_TILE:
-		*drm_pixfmt = DRM_FORMAT_XRGB8888;
+		*drm_pixfmt = DRM_FORMAT_BGRX8888;
 		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
 		break;
 	case SDE_PIX_FMT_ABGR_8888_TILE:
-		*drm_pixfmt = DRM_FORMAT_ABGR8888;
+		*drm_pixfmt = DRM_FORMAT_RGBA8888;
 		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
 		break;
 	case SDE_PIX_FMT_XBGR_8888_TILE:
-		*drm_pixfmt = DRM_FORMAT_XBGR8888;
+		*drm_pixfmt = DRM_FORMAT_RGBX8888;
 		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
 		break;
 	case SDE_PIX_FMT_ARGB_2101010_TILE:
-		*drm_pixfmt = DRM_FORMAT_ARGB2101010;
-		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
-		break;
-	case SDE_PIX_FMT_XRGB_2101010_TILE:
-		*drm_pixfmt = DRM_FORMAT_XRGB2101010;
-		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
-		break;
-	case SDE_PIX_FMT_ABGR_2101010_TILE:
-		*drm_pixfmt = DRM_FORMAT_ABGR2101010;
-		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
-		break;
-	case SDE_PIX_FMT_XBGR_2101010_TILE:
-		*drm_pixfmt = DRM_FORMAT_XBGR2101010;
-		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
-		break;
-	case SDE_PIX_FMT_BGRA_1010102_TILE:
 		*drm_pixfmt = DRM_FORMAT_BGRA1010102;
 		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
 		break;
-	case SDE_PIX_FMT_BGRX_1010102_TILE:
+	case SDE_PIX_FMT_XRGB_2101010_TILE:
 		*drm_pixfmt = DRM_FORMAT_BGRX1010102;
 		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
 		break;
+	case SDE_PIX_FMT_ABGR_2101010_TILE:
+		*drm_pixfmt = DRM_FORMAT_RGBA1010102;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_XBGR_2101010_TILE:
+		*drm_pixfmt = DRM_FORMAT_RGBX1010102;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_BGRA_1010102_TILE:
+		*drm_pixfmt = DRM_FORMAT_ARGB2101010;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_BGRX_1010102_TILE:
+		*drm_pixfmt = DRM_FORMAT_XRGB2101010;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_RGBA_1010102_UBWC:
+		*drm_pixfmt = DRM_FORMAT_ABGR2101010;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED |
+				DRM_FORMAT_MOD_QCOM_TILE;
+		break;
+	case SDE_PIX_FMT_RGBX_1010102_UBWC:
+		*drm_pixfmt = DRM_FORMAT_XBGR2101010;
+		*drm_modifier = DRM_FORMAT_MOD_QCOM_COMPRESSED |
+				DRM_FORMAT_MOD_QCOM_TILE;
+		break;
 	case SDE_PIX_FMT_Y_CBCR_H2V2_P010:
 		*drm_pixfmt = DRM_FORMAT_NV12;
 		*drm_modifier = DRM_FORMAT_MOD_QCOM_DX;
@@ -779,6 +789,25 @@
 }
 
 /**
+ * sde_hw_rot_get_maxlinewidth - get maximum line width of rotator
+ * @hw: Pointer to rotator hardware driver
+ * return: maximum line width
+ */
+static int sde_hw_rot_get_maxlinewidth(struct sde_hw_rot *hw)
+{
+	struct platform_device *pdev;
+
+	if (!hw || !hw->caps || !hw->caps->pdev) {
+		SDE_ERROR("invalid rotator hw\n");
+		return 0;
+	}
+
+	pdev = hw->caps->pdev;
+
+	return sde_rotator_inline_get_maxlinewidth(pdev);
+}
+
+/**
  * _setup_rot_ops - setup rotator operations
  * @ops: Pointer to operation table
  * @features: available feature bitmask
@@ -790,64 +819,7 @@
 	ops->get_format_caps = sde_hw_rot_get_format_caps;
 	ops->get_downscale_caps = sde_hw_rot_get_downscale_caps;
 	ops->get_cache_size = sde_hw_rot_get_cache_size;
-}
-
-/**
- * sde_hw_rot_init - create/initialize given rotator instance
- * @idx: index of given rotator
- * @addr: i/o address mapping
- * @m: Pointer to mdss catalog
- * return: Pointer to hardware rotator driver of the given instance
- */
-struct sde_hw_rot *sde_hw_rot_init(enum sde_rot idx,
-		void __iomem *addr,
-		struct sde_mdss_cfg *m)
-{
-	struct sde_hw_rot *c;
-	struct sde_rot_cfg *cfg;
-	int rc;
-
-	c = kzalloc(sizeof(*c), GFP_KERNEL);
-	if (!c)
-		return ERR_PTR(-ENOMEM);
-
-	cfg = _rot_offset(idx, m, addr, &c->hw);
-	if (IS_ERR(cfg)) {
-		WARN(1, "Unable to find rot idx=%d\n", idx);
-		kfree(c);
-		return ERR_PTR(-EINVAL);
-	}
-
-	/* Assign ops */
-	c->idx = idx;
-	c->caps = cfg;
-	_setup_rot_ops(&c->ops, c->caps->features);
-
-	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_ROT, idx);
-	if (rc) {
-		SDE_ERROR("failed to init hw blk %d\n", rc);
-		goto blk_init_error;
-	}
-
-	return c;
-
-blk_init_error:
-	kzfree(c);
-
-	return ERR_PTR(rc);
-}
-
-/**
- * sde_hw_rot_destroy - destroy given hardware rotator driver
- * @hw_rot: Pointer to hardware rotator driver
- * return: none
- */
-void sde_hw_rot_destroy(struct sde_hw_rot *hw_rot)
-{
-	sde_hw_blk_destroy(&hw_rot->base);
-	kfree(hw_rot->downscale_caps);
-	kfree(hw_rot->format_caps);
-	kfree(hw_rot);
+	ops->get_maxlinewidth = sde_hw_rot_get_maxlinewidth;
 }
 
 /**
@@ -881,26 +853,81 @@
 	return rc;
 }
 
+static struct sde_hw_blk_ops sde_hw_rot_ops = {
+	.start = sde_hw_rot_blk_start,
+	.stop = sde_hw_rot_blk_stop,
+};
+
+/**
+ * sde_hw_rot_init - create/initialize given rotator instance
+ * @idx: index of given rotator
+ * @addr: i/o address mapping
+ * @m: Pointer to mdss catalog
+ * return: Pointer to hardware rotator driver of the given instance
+ */
+struct sde_hw_rot *sde_hw_rot_init(enum sde_rot idx,
+		void __iomem *addr,
+		struct sde_mdss_cfg *m)
+{
+	struct sde_hw_rot *c;
+	struct sde_rot_cfg *cfg;
+	int rc;
+
+	c = kzalloc(sizeof(*c), GFP_KERNEL);
+	if (!c)
+		return ERR_PTR(-ENOMEM);
+
+	cfg = _rot_offset(idx, m, addr, &c->hw);
+	if (IS_ERR(cfg)) {
+		WARN(1, "Unable to find rot idx=%d\n", idx);
+		kfree(c);
+		return ERR_PTR(-EINVAL);
+	}
+
+	/* Assign ops */
+	c->idx = idx;
+	c->caps = cfg;
+	_setup_rot_ops(&c->ops, c->caps->features);
+
+	rc = sde_hw_blk_init(&c->base, SDE_HW_BLK_ROT, idx,
+			&sde_hw_rot_ops);
+	if (rc) {
+		SDE_ERROR("failed to init hw blk %d\n", rc);
+		goto blk_init_error;
+	}
+
+	return c;
+
+blk_init_error:
+	kzfree(c);
+
+	return ERR_PTR(rc);
+}
+
+/**
+ * sde_hw_rot_destroy - destroy given hardware rotator driver
+ * @hw_rot: Pointer to hardware rotator driver
+ * return: none
+ */
+void sde_hw_rot_destroy(struct sde_hw_rot *hw_rot)
+{
+	sde_hw_blk_destroy(&hw_rot->base);
+	kfree(hw_rot->downscale_caps);
+	kfree(hw_rot->format_caps);
+	kfree(hw_rot);
+}
+
 struct sde_hw_rot *sde_hw_rot_get(struct sde_hw_rot *hw_rot)
 {
 	struct sde_hw_blk *hw_blk = sde_hw_blk_get(hw_rot ? &hw_rot->base :
 			NULL, SDE_HW_BLK_ROT, -1);
-	int rc = 0;
 
-	if (!hw_rot && hw_blk)
-		rc = sde_hw_rot_blk_start(hw_blk);
-
-	if (rc) {
-		sde_hw_blk_put(hw_blk, NULL);
-		return NULL;
-	}
-
-	return hw_blk ? to_sde_hw_rot(hw_blk) : NULL;
+	return IS_ERR_OR_NULL(hw_blk) ? NULL : to_sde_hw_rot(hw_blk);
 }
 
 void sde_hw_rot_put(struct sde_hw_rot *hw_rot)
 {
 	struct sde_hw_blk *hw_blk = hw_rot ? &hw_rot->base : NULL;
 
-	sde_hw_blk_put(hw_blk, sde_hw_rot_blk_stop);
+	sde_hw_blk_put(hw_blk);
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_rot.h b/drivers/gpu/drm/msm/sde/sde_hw_rot.h
index 949f9bd..a4f5b49 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_rot.h
+++ b/drivers/gpu/drm/msm/sde/sde_hw_rot.h
@@ -20,12 +20,6 @@
 
 struct sde_hw_rot;
 
-/* tags for attachment */
-#define SDE_TAG_ROT_OUT_FBO	0x1000
-#define SDE_TAG_ROT_OUT_FB	0x1001
-#define SDE_TAG_ROT_PLANE	0x1002
-#define SDE_TAG_ROT_IN_FB	0x1003
-
 /**
  * enum sde_hw_rot_cmd_type - type of rotator hardware command
  * @SDE_HW_ROT_CMD_VALDIATE: validate rotator command; do not commit
@@ -124,6 +118,7 @@
 			struct sde_hw_rot *hw);
 	const char *(*get_downscale_caps)(struct sde_hw_rot *hw);
 	size_t (*get_cache_size)(struct sde_hw_rot *hw);
+	int (*get_maxlinewidth)(struct sde_hw_rot *hw);
 };
 
 /**
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
index 37fb81d..c045067 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -1156,8 +1156,9 @@
 	if (cfg->sblk->scaler_blk.len)
 		sde_dbg_reg_register_dump_range(SDE_DBG_NAME,
 			cfg->sblk->scaler_blk.name,
-			cfg->sblk->scaler_blk.base,
-			cfg->sblk->scaler_blk.base + cfg->sblk->scaler_blk.len,
+			hw_pipe->hw.blk_off + cfg->sblk->scaler_blk.base,
+			hw_pipe->hw.blk_off + cfg->sblk->scaler_blk.base +
+				cfg->sblk->scaler_blk.len,
 			hw_pipe->hw.xin_id);
 
 	return hw_pipe;
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
index a7bebc2..b20b3bc 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_top.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -292,6 +292,7 @@
 	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
 			mdp->hw.blk_off, mdp->hw.blk_off + mdp->hw.length,
 			mdp->hw.xin_id);
+	sde_dbg_set_sde_top_offset(mdp->hw.blk_off);
 
 	_sde_hw_mdptop_init_ubwc(addr, m);
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
index e9f54d0..048ec47 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
@@ -158,8 +158,7 @@
 	c->cap = cfg;
 	_setup_vbif_ops(&c->ops, c->cap->features);
 
-	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
-			c->hw.blk_off + c->hw.length, c->hw.xin_id);
+	/* no need to register sub-range in sde dbg, dump entire vbif io base */
 
 	return c;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 8bc6a2b..7e18a0e 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -57,7 +57,7 @@
  * # echo 0x2 > /sys/module/drm/parameters/debug
  *
  * To enable DRM driver h/w logging
- * # echo <mask> > /sys/kernel/debug/dri/0/hw_log_mask
+ * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
  *
  * See sde_hw_mdss.h for h/w logging mask definitions (search for SDE_DBG_MASK_)
  */
@@ -275,7 +275,13 @@
 
 void *sde_debugfs_get_root(struct sde_kms *sde_kms)
 {
-	return sde_kms ? sde_kms->dev->primary->debugfs_root : 0;
+	struct msm_drm_private *priv;
+
+	if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
+		return NULL;
+
+	priv = sde_kms->dev->dev_private;
+	return priv->debug_root;
 }
 
 static int _sde_debugfs_init(struct sde_kms *sde_kms)
@@ -405,11 +411,11 @@
 		if (encoder->crtc != crtc)
 			continue;
 		/*
-		 * Wait post-flush if necessary to delay before plane_cleanup
-		 * For example, wait for vsync in case of video mode panels
-		 * This should be a no-op for command mode panels
+		 * Wait for post-flush if necessary to delay before
+		 * plane_cleanup. For example, wait for vsync in case of video
+		 * mode panels. This may be a no-op for command mode panels.
 		 */
-		SDE_EVT32(DRMID(crtc));
+		SDE_EVT32_VERBOSE(DRMID(crtc));
 		ret = sde_encoder_wait_for_commit_done(encoder);
 		if (ret && ret != -EWOULDBLOCK) {
 			SDE_ERROR("wait for commit done returned %d\n", ret);
@@ -1226,7 +1232,7 @@
 /* the caller api needs to turn on clock before calling it */
 static inline void _sde_kms_core_hw_rev_init(struct sde_kms *sde_kms)
 {
-	return;
+	sde_kms->core_rev = readl_relaxed(sde_kms->mmio + 0x0);
 }
 
 static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 0be17e4..e8892fb 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -253,9 +253,10 @@
 			((src_width + 32) * fmt->bpp);
 	}
 
-	SDE_DEBUG("plane%u: pnum:%d fmt:%x w:%u fl:%u\n",
+	SDE_DEBUG("plane%u: pnum:%d fmt: %4.4s w:%u fl:%u\n",
 			plane->base.id, psde->pipe - SSPP_VIG0,
-			fmt->base.pixel_format, src_width, total_fl);
+			(char *)&fmt->base.pixel_format,
+			src_width, total_fl);
 
 	return total_fl;
 }
@@ -365,10 +366,10 @@
 			psde->is_rt_pipe, total_fl, qos_lut,
 			(fmt) ? SDE_FORMAT_IS_LINEAR(fmt) : 0);
 
-	SDE_DEBUG("plane%u: pnum:%d fmt:%x rt:%d fl:%u lut:0x%x\n",
+	SDE_DEBUG("plane%u: pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%x\n",
 			plane->base.id,
 			psde->pipe - SSPP_VIG0,
-			(fmt) ? fmt->base.pixel_format : 0,
+			fmt ? (char *)&fmt->base.pixel_format : NULL,
 			psde->is_rt_pipe, total_fl, qos_lut);
 
 	psde->pipe_hw->ops.setup_creq_lut(psde->pipe_hw, &psde->pipe_qos_cfg);
@@ -427,10 +428,10 @@
 			psde->pipe_qos_cfg.danger_lut,
 			psde->pipe_qos_cfg.safe_lut);
 
-	SDE_DEBUG("plane%u: pnum:%d fmt:%x mode:%d luts[0x%x, 0x%x]\n",
+	SDE_DEBUG("plane%u: pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
 		plane->base.id,
 		psde->pipe - SSPP_VIG0,
-		fmt ? fmt->base.pixel_format : 0,
+		fmt ? (char *)&fmt->base.pixel_format : NULL,
 		fmt ? fmt->fetch_mode : -1,
 		psde->pipe_qos_cfg.danger_lut,
 		psde->pipe_qos_cfg.safe_lut);
@@ -620,8 +621,6 @@
 			prefix = sde_sync_get_name_prefix(input_fence);
 			rc = sde_sync_wait(input_fence, wait_ms);
 
-			SDE_EVT32(DRMID(plane), -ret, prefix);
-
 			switch (rc) {
 			case 0:
 				SDE_ERROR_PLANE(psde, "%ums timeout on %08X\n",
@@ -648,6 +647,8 @@
 				ret = 0;
 				break;
 			}
+
+			SDE_EVT32_VERBOSE(DRMID(plane), -ret, prefix);
 		} else {
 			ret = 0;
 		}
@@ -1220,6 +1221,40 @@
 }
 
 /**
+ * _sde_plane_fb_get/put - framebuffer callback for crtc res ops
+ */
+static void *_sde_plane_fb_get(void *fb, u32 type, u64 tag)
+{
+	drm_framebuffer_reference(fb);
+	return fb;
+}
+static void _sde_plane_fb_put(void *fb)
+{
+	drm_framebuffer_unreference(fb);
+}
+static struct sde_crtc_res_ops fb_res_ops = {
+	.put = _sde_plane_fb_put,
+	.get = _sde_plane_fb_get,
+};
+
+/**
+ * _sde_plane_fbo_get/put - framebuffer object callback for crtc res ops
+ */
+static void *_sde_plane_fbo_get(void *fbo, u32 type, u64 tag)
+{
+	sde_kms_fbo_reference(fbo);
+	return fbo;
+}
+static void _sde_plane_fbo_put(void *fbo)
+{
+	sde_kms_fbo_unreference(fbo);
+}
+static struct sde_crtc_res_ops fbo_res_ops = {
+	.put = _sde_plane_fbo_put,
+	.get = _sde_plane_fbo_get,
+};
+
+/**
  * sde_plane_rot_calc_prefill - calculate rotator start prefill
  * @plane: Pointer to drm plane
  * return: prefill time in line
@@ -1294,17 +1329,26 @@
 	struct sde_plane_state *pstate;
 	struct sde_plane_rot_state *rstate;
 	struct sde_hw_blk *hw_blk;
-	struct sde_hw_blk_attachment *attach;
+	struct drm_crtc_state *cstate;
 	struct drm_rect *in_rot, *out_rot;
+	struct drm_plane *attached_plane;
 	u32 dst_x, dst_y, dst_w, dst_h;
 	int found = 0;
 	int xpos = 0;
+	int ret;
 
 	if (!plane || !state || !state->state) {
 		SDE_ERROR("invalid parameters\n");
 		return;
 	}
 
+	cstate = _sde_plane_get_crtc_state(state);
+	if (IS_ERR_OR_NULL(cstate)) {
+		ret = PTR_ERR(cstate);
+		SDE_ERROR("invalid crtc state %d\n", ret);
+		return;
+	}
+
 	pstate = to_sde_plane_state(state);
 	rstate = &pstate->rot;
 
@@ -1341,24 +1385,12 @@
 	hw_blk = &rstate->rot_hw->base;
 
 	/* enumerating over all planes attached to the same rotator */
-	list_for_each_entry(attach, &hw_blk->attach_list, list) {
-		struct drm_plane *attached_plane;
+	drm_atomic_crtc_state_for_each_plane(attached_plane, cstate) {
 		struct drm_plane_state *attached_state;
 		struct sde_plane_state *attached_pstate;
 		struct sde_plane_rot_state *attached_rstate;
 		struct drm_rect attached_out_rect;
 
-		if (attach->tag != SDE_TAG_ROT_PLANE)
-			continue;
-
-		attached_plane = attach->value;
-
-		found++;
-
-		/* skip itself */
-		if (attached_plane == plane)
-			continue;
-
 		attached_state = drm_atomic_get_existing_plane_state(
 				state->state, attached_plane);
 
@@ -1368,6 +1400,15 @@
 		attached_pstate = to_sde_plane_state(attached_state);
 		attached_rstate = &attached_pstate->rot;
 
+		if (attached_rstate->rot_hw != rstate->rot_hw)
+			continue;
+
+		found++;
+
+		/* skip itself */
+		if (attached_plane == plane)
+			continue;
+
 		/* find bounding rotator source roi */
 		if (attached_state->src_x < in_rot->x1)
 			in_rot->x1 = attached_state->src_x;
@@ -1583,31 +1624,25 @@
 		rstate->out_rotation &= ~DRM_REFLECT_Y;
 
 	SDE_DEBUG(
-		"plane%d.%d rot:%d/%c%c%c%c/%dx%d/%c%c%c%c/%llx/%dx%d+%d+%d\n",
+		"plane%d.%d rot:%d/%c%c%c%c/%dx%d/%4.4s/%llx/%dx%d+%d+%d\n",
 			plane->base.id, rstate->sequence_id, hw_cmd,
 			rot_cmd->rot90 ? 'r' : '_',
 			rot_cmd->hflip ? 'h' : '_',
 			rot_cmd->vflip ? 'v' : '_',
 			rot_cmd->video_mode ? 'V' : 'C',
 			state->fb->width, state->fb->height,
-			state->fb->pixel_format >> 0,
-			state->fb->pixel_format >> 8,
-			state->fb->pixel_format >> 16,
-			state->fb->pixel_format >> 24,
+			(char *) &state->fb->pixel_format,
 			state->fb->modifier[0],
 			drm_rect_width(&rstate->in_rot_rect) >> 16,
 			drm_rect_height(&rstate->in_rot_rect) >> 16,
 			rstate->in_rot_rect.x1 >> 16,
 			rstate->in_rot_rect.y1 >> 16);
 
-	SDE_DEBUG("plane%d.%d sspp:%d/%x/%dx%d/%c%c%c%c/%llx/%dx%d+%d+%d\n",
+	SDE_DEBUG("plane%d.%d sspp:%d/%x/%dx%d/%4.4s/%llx/%dx%d+%d+%d\n",
 			plane->base.id, rstate->sequence_id, hw_cmd,
 			rstate->out_rotation,
 			rstate->out_fb_width, rstate->out_fb_height,
-			rstate->out_fb_pixel_format >> 0,
-			rstate->out_fb_pixel_format >> 8,
-			rstate->out_fb_pixel_format >> 16,
-			rstate->out_fb_pixel_format >> 24,
+			(char *) &rstate->out_fb_pixel_format,
 			rstate->out_fb_modifier[0],
 			rstate->out_src_w >> 16, rstate->out_src_h >> 16,
 			rstate->out_src_x >> 16, rstate->out_src_y >> 16);
@@ -1628,6 +1663,7 @@
 	struct drm_framebuffer *fb = new_state->fb;
 	struct sde_plane_state *new_pstate = to_sde_plane_state(new_state);
 	struct sde_plane_rot_state *new_rstate = &new_pstate->rot;
+	struct drm_crtc_state *cstate;
 	int ret;
 
 	SDE_DEBUG("plane%d.%d FB[%u] sbuf:%d rot:%d crtc:%d\n",
@@ -1639,6 +1675,13 @@
 	if (!new_rstate->out_sbuf || !new_rstate->rot_hw)
 		return 0;
 
+	cstate = _sde_plane_get_crtc_state(new_state);
+	if (IS_ERR(cstate)) {
+		ret = PTR_ERR(cstate);
+		SDE_ERROR("invalid crtc state %d\n", ret);
+		return ret;
+	}
+
 	/* need to re-calc based on all newly validated plane states */
 	sde_plane_rot_calc_cfg(plane, new_state);
 
@@ -1647,26 +1690,25 @@
 		struct sde_kms_fbo *fbo;
 		struct drm_framebuffer *fb;
 
-		fbo = sde_hw_blk_lookup_value(&new_rstate->rot_hw->base,
-				SDE_TAG_ROT_OUT_FBO, 0);
-		fb = sde_hw_blk_lookup_value(&new_rstate->rot_hw->base,
-				SDE_TAG_ROT_OUT_FB, 0);
+		fbo = sde_crtc_res_get(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
+				(u64) &new_rstate->rot_hw->base);
+		fb = sde_crtc_res_get(cstate, SDE_CRTC_RES_ROT_OUT_FB,
+				(u64) &new_rstate->rot_hw->base);
 		if (fb && fbo) {
 			SDE_DEBUG("plane%d.%d get fb/fbo\n", plane->base.id,
 					new_rstate->sequence_id);
-
-			new_rstate->out_fbo = fbo;
-			sde_kms_fbo_reference(new_rstate->out_fbo);
-			sde_hw_blk_attach(&new_rstate->rot_hw->base,
-					SDE_TAG_ROT_OUT_FBO,
-					new_rstate->out_fbo);
-
-			new_rstate->out_fb = fb;
-			drm_framebuffer_reference(new_rstate->out_fb);
-			sde_hw_blk_attach(&new_rstate->rot_hw->base,
-					SDE_TAG_ROT_OUT_FB,
-					new_rstate->out_fb);
+		} else if (fbo) {
+			sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
+					(u64) &new_rstate->rot_hw->base);
+			fbo = NULL;
+		} else if (fb) {
+			sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
+					(u64) &new_rstate->rot_hw->base);
+			fb = NULL;
 		}
+
+		new_rstate->out_fbo = fbo;
+		new_rstate->out_fb = fb;
 	}
 
 	/* release buffer if output format configuration changes */
@@ -1682,13 +1724,11 @@
 		SDE_DEBUG("plane%d.%d release fb/fbo\n", plane->base.id,
 				new_rstate->sequence_id);
 
-		sde_hw_blk_detach(&new_rstate->rot_hw->base,
-				SDE_TAG_ROT_OUT_FB, new_rstate->out_fb);
-		drm_framebuffer_unreference(new_rstate->out_fb);
+		sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
+				(u64) &new_rstate->rot_hw->base);
 		new_rstate->out_fb = NULL;
-		sde_hw_blk_detach(&new_rstate->rot_hw->base,
-				SDE_TAG_ROT_OUT_FBO, new_rstate->out_fbo);
-		sde_kms_fbo_unreference(new_rstate->out_fbo);
+		sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
+				(u64) &new_rstate->rot_hw->base);
 		new_rstate->out_fbo = NULL;
 	}
 
@@ -1716,8 +1756,13 @@
 			goto error_create_fbo;
 		}
 
-		sde_hw_blk_attach(&new_rstate->rot_hw->base,
-				SDE_TAG_ROT_OUT_FBO, new_rstate->out_fbo);
+		ret = sde_crtc_res_add(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
+				(u64) &new_rstate->rot_hw->base,
+				new_rstate->out_fbo, &fbo_res_ops);
+		if (ret) {
+			SDE_ERROR("failed to add crtc resource\n");
+			goto error_create_fbo_res;
+		}
 
 		new_rstate->out_fb = sde_kms_fbo_create_fb(plane->dev,
 				new_rstate->out_fbo);
@@ -1727,8 +1772,13 @@
 			goto error_create_fb;
 		}
 
-		sde_hw_blk_attach(&new_rstate->rot_hw->base,
-				SDE_TAG_ROT_OUT_FB, new_rstate->out_fb);
+		ret = sde_crtc_res_add(cstate, SDE_CRTC_RES_ROT_OUT_FB,
+				(u64) &new_rstate->rot_hw->base,
+				new_rstate->out_fb, &fb_res_ops);
+		if (ret) {
+			SDE_ERROR("failed to add crtc resource %d\n", ret);
+			goto error_create_fb_res;
+		}
 	}
 
 	/* prepare rotator input buffer */
@@ -1756,14 +1806,14 @@
 error_prepare_output_buffer:
 	msm_framebuffer_cleanup(new_state->fb, new_rstate->mmu_id);
 error_prepare_input_buffer:
-	sde_hw_blk_detach(&new_rstate->rot_hw->base, SDE_TAG_ROT_OUT_FB,
-			new_rstate->out_fb);
-	drm_framebuffer_unreference(new_rstate->out_fb);
+	sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
+			(u64) &new_rstate->rot_hw->base);
+error_create_fb_res:
 	new_rstate->out_fb = NULL;
 error_create_fb:
-	sde_hw_blk_detach(&new_rstate->rot_hw->base, SDE_TAG_ROT_OUT_FBO,
-			new_rstate->out_fbo);
-	sde_kms_fbo_unreference(new_rstate->out_fbo);
+	sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
+			(u64) &new_rstate->rot_hw->base);
+error_create_fbo_res:
 	new_rstate->out_fbo = NULL;
 error_create_fbo:
 	return ret;
@@ -1782,6 +1832,7 @@
 	struct sde_plane_state *old_pstate = to_sde_plane_state(old_state);
 	struct sde_plane_rot_state *old_rstate = &old_pstate->rot;
 	struct sde_hw_rot_cmd *cmd = &old_rstate->rot_cmd;
+	struct drm_crtc_state *cstate;
 	int ret;
 
 	SDE_DEBUG("plane%d.%d FB[%u] sbuf:%d rot:%d crtc:%d\n", plane->base.id,
@@ -1792,6 +1843,13 @@
 	if (!old_rstate->out_sbuf || !old_rstate->rot_hw)
 		return;
 
+	cstate = _sde_plane_get_crtc_state(old_state);
+	if (IS_ERR(cstate)) {
+		ret = PTR_ERR(cstate);
+		SDE_ERROR("invalid crtc state %d\n", ret);
+		return;
+	}
+
 	if (sde_plane_crtc_enabled(old_state)) {
 		ret = old_rstate->rot_hw->ops.commit(old_rstate->rot_hw, cmd,
 				SDE_HW_ROT_CMD_CLEANUP);
@@ -1803,15 +1861,11 @@
 		if (old_rstate->out_fb) {
 			msm_framebuffer_cleanup(old_rstate->out_fb,
 					old_rstate->mmu_id);
-			sde_hw_blk_detach(&old_rstate->rot_hw->base,
-					SDE_TAG_ROT_OUT_FB,
-					old_rstate->out_fb);
-			drm_framebuffer_unreference(old_rstate->out_fb);
+			sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FB,
+					(u64) &old_rstate->rot_hw->base);
 			old_rstate->out_fb = NULL;
-			sde_hw_blk_detach(&old_rstate->rot_hw->base,
-					SDE_TAG_ROT_OUT_FBO,
-					old_rstate->out_fbo);
-			sde_kms_fbo_unreference(old_rstate->out_fbo);
+			sde_crtc_res_put(cstate, SDE_CRTC_RES_ROT_OUT_FBO,
+					(u64) &old_rstate->rot_hw->base);
 			old_rstate->out_fbo = NULL;
 		}
 
@@ -1831,6 +1885,7 @@
 	struct sde_plane *psde;
 	struct sde_plane_state *pstate, *old_pstate;
 	struct sde_plane_rot_state *rstate, *old_rstate;
+	struct drm_crtc_state *cstate;
 	struct sde_hw_blk *hw_blk;
 	int i, ret = 0;
 
@@ -1845,6 +1900,14 @@
 	rstate = &pstate->rot;
 	old_rstate = &old_pstate->rot;
 
+	/* cstate will be null if crtc is disconnected from plane */
+	cstate = _sde_plane_get_crtc_state(state);
+	if (IS_ERR(cstate)) {
+		ret = PTR_ERR(cstate);
+		SDE_ERROR("invalid crtc state %d\n", ret);
+		return ret;
+	}
+
 	SDE_DEBUG("plane%d.%d FB[%u] sbuf:%d rot:%d crtc:%d\n", plane->base.id,
 			rstate->sequence_id, state->fb ? state->fb->base.id : 0,
 			!!rstate->out_sbuf, !!rstate->rot_hw,
@@ -1852,70 +1915,40 @@
 
 	rstate->in_rotation = drm_rotation_simplify(
 			sde_plane_get_property(pstate, PLANE_PROP_ROTATION),
-			DRM_ROTATE_90 | DRM_REFLECT_X | DRM_REFLECT_Y);
+			DRM_ROTATE_0 | DRM_ROTATE_90 |
+			DRM_REFLECT_X | DRM_REFLECT_Y);
 	rstate->rot90 = rstate->in_rotation & DRM_ROTATE_90 ? true : false;
 	rstate->hflip = rstate->in_rotation & DRM_REFLECT_X ? true : false;
 	rstate->vflip = rstate->in_rotation & DRM_REFLECT_Y ? true : false;
 	rstate->out_sbuf = psde->sbuf_mode || rstate->rot90;
 
-	if ((!sde_plane_enabled(state) || !rstate->out_sbuf) &&
-			rstate->rot_hw) {
-
-		SDE_DEBUG("plane%d.%d release rotator\n",
+	if (sde_plane_enabled(state) && rstate->out_sbuf) {
+		SDE_DEBUG("plane%d.%d acquire rotator\n",
 				plane->base.id, rstate->sequence_id);
 
-		sde_hw_blk_detach(&rstate->rot_hw->base, SDE_TAG_ROT_IN_FB,
-				rstate->in_fb);
-		rstate->in_fb = NULL;
-		sde_hw_blk_detach(&rstate->rot_hw->base, SDE_TAG_ROT_PLANE,
-				plane);
-		sde_hw_rot_put(rstate->rot_hw);
-		rstate->rot_hw = NULL;
-
-	} else if (sde_plane_enabled(state) && rstate->out_sbuf &&
-			!rstate->rot_hw) {
-
-		SDE_DEBUG("plane%d.%d allocate rotator\n",
-				plane->base.id, rstate->sequence_id);
-
-		hw_blk = sde_hw_blk_lookup_blk(SDE_TAG_ROT_IN_FB, state->fb,
-				SDE_HW_BLK_ROT);
-		if (hw_blk)
-			rstate->rot_hw = to_sde_hw_rot(hw_blk);
-		else
-			rstate->rot_hw = sde_hw_rot_get(NULL);
-
-		if (!rstate->rot_hw) {
+		hw_blk = sde_crtc_res_get(cstate, SDE_HW_BLK_ROT,
+				(u64) state->fb);
+		if (!hw_blk) {
 			SDE_ERROR("plane%d no available rotator\n",
 					plane->base.id);
 			return -EINVAL;
 		}
 
+		rstate->rot_hw = to_sde_hw_rot(hw_blk);
+
 		if (!rstate->rot_hw->ops.commit) {
 			SDE_ERROR("plane%d invalid rotator ops\n",
 					plane->base.id);
-			sde_hw_rot_put(rstate->rot_hw);
+			sde_crtc_res_put(cstate,
+					SDE_HW_BLK_ROT, (u64) state->fb);
 			rstate->rot_hw = NULL;
 			return -EINVAL;
 		}
 
 		rstate->in_fb = state->fb;
-		sde_hw_blk_attach(&rstate->rot_hw->base, SDE_TAG_ROT_IN_FB,
-				rstate->in_fb);
-		sde_hw_blk_attach(&rstate->rot_hw->base, SDE_TAG_ROT_PLANE,
-				plane);
-
-	} else if (sde_plane_enabled(state) && rstate->out_sbuf &&
-			(rstate->in_fb != state->fb)) {
-
-		SDE_DEBUG("plane%d.%d update fb\n",
-				plane->base.id, rstate->sequence_id);
-
-		sde_hw_blk_detach(&rstate->rot_hw->base, SDE_TAG_ROT_IN_FB,
-				rstate->in_fb);
-		rstate->in_fb = state->fb;
-		sde_hw_blk_attach(&rstate->rot_hw->base, SDE_TAG_ROT_IN_FB,
-				rstate->in_fb);
+	} else {
+		rstate->in_fb = NULL;
+		rstate->rot_hw = NULL;
 	}
 
 	if (sde_plane_enabled(state) && rstate->out_sbuf && rstate->rot_hw) {
@@ -2008,16 +2041,6 @@
 			rstate->sequence_id,
 			!!rstate->out_sbuf, !!rstate->rot_hw,
 			sde_plane_crtc_enabled(state));
-
-	if (rstate->rot_hw) {
-		sde_hw_blk_detach(&rstate->rot_hw->base, SDE_TAG_ROT_IN_FB,
-				rstate->in_fb);
-		rstate->in_fb = NULL;
-		sde_hw_blk_detach(&rstate->rot_hw->base, SDE_TAG_ROT_PLANE,
-				plane);
-		sde_hw_rot_put(rstate->rot_hw);
-		rstate->rot_hw = NULL;
-	}
 }
 
 /**
@@ -2031,6 +2054,8 @@
 {
 	struct sde_plane_state *pstate  = to_sde_plane_state(new_state);
 	struct sde_plane_rot_state *rstate = &pstate->rot;
+	struct drm_crtc_state *cstate;
+	int ret;
 
 	rstate->sequence_id++;
 
@@ -2038,14 +2063,19 @@
 			rstate->sequence_id,
 			!!rstate->out_sbuf, !!rstate->rot_hw);
 
-	if (rstate->rot_hw) {
-		sde_hw_blk_attach(&rstate->rot_hw->base, SDE_TAG_ROT_IN_FB,
-				rstate->in_fb);
-		sde_hw_blk_attach(&rstate->rot_hw->base, SDE_TAG_ROT_PLANE,
-				plane);
-		sde_hw_rot_get(rstate->rot_hw);
+	cstate = _sde_plane_get_crtc_state(new_state);
+	if (IS_ERR(cstate)) {
+		ret = PTR_ERR(cstate);
+		SDE_ERROR("invalid crtc state %d\n", ret);
+		return -EINVAL;
 	}
 
+	if (rstate->rot_hw && cstate)
+		sde_crtc_res_get(cstate, SDE_HW_BLK_ROT, (u64) rstate->in_fb);
+	else if (rstate->rot_hw && !cstate)
+		SDE_ERROR("plane%d.%d zombie rotator hw\n",
+				plane->base.id, rstate->sequence_id);
+
 	rstate->out_fb = NULL;
 	rstate->out_fbo = NULL;
 
@@ -2108,6 +2138,10 @@
 		sde_kms_info_add_keyint(info, "cache_size",
 				rot_hw->ops.get_cache_size(rot_hw));
 
+	if (rot_hw->ops.get_maxlinewidth)
+		sde_kms_info_add_keyint(info, "max_linewidth",
+				rot_hw->ops.get_maxlinewidth(rot_hw));
+
 	msm_property_set_blob(&psde->property_info, &psde->blob_rot_caps,
 			info->data, info->len, PLANE_PROP_ROT_CAPS_V1);
 
@@ -2126,7 +2160,8 @@
 	struct sde_mdss_cfg *catalog)
 {
 	struct sde_plane *psde = to_sde_plane(plane);
-	unsigned long supported_rotations = DRM_REFLECT_X | DRM_REFLECT_Y;
+	unsigned long supported_rotations = DRM_ROTATE_0 | DRM_REFLECT_X |
+			DRM_REFLECT_Y;
 
 	if (!plane || !psde) {
 		SDE_ERROR("invalid plane\n");
@@ -2195,13 +2230,10 @@
 	nplanes = fmt->num_planes;
 
 	SDE_DEBUG(
-		"plane%d.%d sspp:%dx%d/%c%c%c%c/%llx/%dx%d+%d+%d/%x crtc:%dx%d+%d+%d\n",
+		"plane%d.%d sspp:%dx%d/%4.4s/%llx/%dx%d+%d+%d/%x crtc:%dx%d+%d+%d\n",
 			plane->base.id, rstate->sequence_id,
 			rstate->out_fb_width, rstate->out_fb_height,
-			rstate->out_fb_pixel_format >> 0,
-			rstate->out_fb_pixel_format >> 8,
-			rstate->out_fb_pixel_format >> 16,
-			rstate->out_fb_pixel_format >> 24,
+			(char *) &rstate->out_fb_pixel_format,
 			rstate->out_fb_modifier[0],
 			rstate->out_src_w >> 16, rstate->out_src_h >> 16,
 			rstate->out_src_x >> 16, rstate->out_src_y >> 16,
@@ -2272,13 +2304,10 @@
 			state->crtc_w, state->crtc_h, !q16_data);
 
 		SDE_DEBUG_PLANE(psde,
-			"FB[%u] %u,%u,%ux%u->crtc%u %d,%d,%ux%u, %c%c%c%c ubwc %d\n",
+			"FB[%u] %u,%u,%ux%u->crtc%u %d,%d,%ux%u, %4.4s ubwc %d\n",
 				fb->base.id, src.x, src.y, src.w, src.h,
 				crtc->base.id, dst.x, dst.y, dst.w, dst.h,
-				fmt->base.pixel_format >> 0,
-				fmt->base.pixel_format >> 8,
-				fmt->base.pixel_format >> 16,
-				fmt->base.pixel_format >> 24,
+				(char *)&fmt->base.pixel_format,
 				SDE_FORMAT_IS_UBWC(fmt));
 
 		if (sde_plane_get_property(pstate, PLANE_PROP_SRC_CONFIG) &
@@ -2731,14 +2760,11 @@
 		goto modeset_update;
 
 	SDE_DEBUG(
-		"plane%d.%u sspp:%x/%dx%d/%c%c%c%c/%llx/%dx%d+%d+%d crtc:%dx%d+%d+%d\n",
+		"plane%d.%u sspp:%x/%dx%d/%4.4s/%llx/%dx%d+%d+%d crtc:%dx%d+%d+%d\n",
 			plane->base.id, rstate->sequence_id,
 			rstate->out_rotation,
 			rstate->out_fb_width, rstate->out_fb_height,
-			rstate->out_fb_pixel_format >> 0,
-			rstate->out_fb_pixel_format >> 8,
-			rstate->out_fb_pixel_format >> 16,
-			rstate->out_fb_pixel_format >> 24,
+			(char *) &rstate->out_fb_pixel_format,
 			rstate->out_fb_modifier[0],
 			rstate->out_src_w >> 16, rstate->out_src_h >> 16,
 			rstate->out_src_x >> 16, rstate->out_src_y >> 16,
@@ -2830,11 +2856,11 @@
 		sde_kms_rect_intersect(&intersect, &src, &pstate->excl_rect);
 		if (!intersect.w || !intersect.h || SDE_FORMAT_IS_YUV(fmt)) {
 			SDE_ERROR_PLANE(psde,
-				"invalid excl_rect:{%d,%d,%d,%d} src:{%d,%d,%d,%d}, fmt:%s\n",
+				"invalid excl_rect:{%d,%d,%d,%d} src:{%d,%d,%d,%d}, fmt: %4.4s\n",
 				pstate->excl_rect.x, pstate->excl_rect.y,
 				pstate->excl_rect.w, pstate->excl_rect.h,
 				src.x, src.y, src.w, src.h,
-				drm_get_format_name(fmt->base.pixel_format));
+				(char *)&fmt->base.pixel_format);
 			ret = -EINVAL;
 		}
 	}
@@ -3531,10 +3557,6 @@
 	msm_property_duplicate_state(&psde->property_info, old_state, pstate,
 			pstate->property_values, pstate->property_blobs);
 
-	/* add ref count for frame buffer */
-	if (pstate->base.fb)
-		drm_framebuffer_reference(pstate->base.fb);
-
 	/* clear out any input fence */
 	pstate->input_fence = 0;
 	input_fence_default = msm_property_get_default(
@@ -3545,6 +3567,8 @@
 	pstate->dirty = 0x0;
 	pstate->pending = false;
 
+	__drm_atomic_helper_plane_duplicate_state(plane, &pstate->base);
+
 	sde_plane_rot_duplicate_state(plane, &pstate->base);
 
 	return &pstate->base;
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index 40e02b8..a4b918e 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -19,6 +19,7 @@
 #include <linux/uaccess.h>
 #include <linux/dma-buf.h>
 #include <linux/slab.h>
+#include <linux/list_sort.h>
 
 #include "sde_dbg.h"
 #include "sde/sde_hw_catalog.h"
@@ -41,7 +42,7 @@
 #define DBGBUS_NAME_SDE		"sde"
 #define DBGBUS_NAME_VBIF_RT	"vbif_rt"
 
-/* offsets from sde_base address for the debug buses */
+/* offsets from sde top address for the debug buses */
 #define DBGBUS_SSPP0	0x188
 #define DBGBUS_SSPP1	0x298
 #define DBGBUS_DSPP	0x348
@@ -54,6 +55,9 @@
 #define MMSS_VBIF_TEST_BUS_OUT_CTRL	0x210
 #define MMSS_VBIF_TEST_BUS_OUT		0x230
 
+/* print debug ranges in groups of 4 u32s */
+#define REG_DUMP_ALIGN		16
+
 /**
  * struct sde_dbg_reg_offset - tracking for start and end of region
  * @start: start offset
@@ -135,6 +139,7 @@
 struct sde_dbg_sde_debug_bus {
 	struct sde_dbg_debug_bus_common cmn;
 	struct sde_debug_bus_entry *entries;
+	u32 top_blk_off;
 };
 
 struct sde_dbg_vbif_debug_bus {
@@ -146,7 +151,6 @@
  * struct sde_dbg_base - global sde debug base structure
  * @evtlog: event log instance
  * @reg_base_list: list of register dumping regions
- * @root: base debugfs root
  * @dev: device pointer
  * @power_ctrl: callback structure for enabling power for reading hw registers
  * @req_dump_blks: list of blocks requested for dumping
@@ -160,7 +164,6 @@
 static struct sde_dbg_base {
 	struct sde_dbg_evtlog *evtlog;
 	struct list_head reg_base_list;
-	struct dentry *root;
 	struct device *dev;
 	struct sde_dbg_power_ctrl power_ctrl;
 
@@ -1961,15 +1964,17 @@
  * _sde_dump_reg - helper function for dumping rotator register set content
  * @dump_name: register set name
  * @reg_dump_flag: dumping flag controlling in-log/memory dump location
+ * @base_addr: starting address of io region for calculating offsets to print
  * @addr: starting address offset for dumping
  * @len_bytes: range of the register set
  * @dump_mem: output buffer for memory dump location option
  * @from_isr: whether being called from isr context
  */
-static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag, char *addr,
-		size_t len_bytes, u32 **dump_mem, bool from_isr)
+static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag,
+		char *base_addr, char *addr, size_t len_bytes, u32 **dump_mem,
+		bool from_isr)
 {
-	u32 in_log, in_mem, len_align_16, len_bytes_aligned;
+	u32 in_log, in_mem, len_align, len_padded;
 	u32 *dump_addr = NULL;
 	char *end_addr;
 	int i;
@@ -1980,28 +1985,33 @@
 	in_log = (reg_dump_flag & SDE_DBG_DUMP_IN_LOG);
 	in_mem = (reg_dump_flag & SDE_DBG_DUMP_IN_MEM);
 
-	pr_debug("reg_dump_flag=%d in_log=%d in_mem=%d\n",
-		reg_dump_flag, in_log, in_mem);
+	pr_debug("%s: reg_dump_flag=%d in_log=%d in_mem=%d\n",
+		dump_name, reg_dump_flag, in_log, in_mem);
 
 	if (!in_log && !in_mem)
 		return;
 
-	len_align_16 = (len_bytes + 15) / 16;
-	len_bytes_aligned = len_align_16 * 16;
+	if (in_log)
+		dev_info(sde_dbg_base.dev, "%s: start_offset 0x%lx len 0x%zx\n",
+				dump_name, addr - base_addr, len_bytes);
+
+	len_align = (len_bytes + REG_DUMP_ALIGN - 1) / REG_DUMP_ALIGN;
+	len_padded = len_align * REG_DUMP_ALIGN;
 	end_addr = addr + len_bytes;
 
 	if (in_mem) {
 		if (dump_mem && !(*dump_mem)) {
 			phys_addr_t phys = 0;
 			*dump_mem = dma_alloc_coherent(sde_dbg_base.dev,
-					len_bytes_aligned, &phys, GFP_KERNEL);
+					len_padded, &phys, GFP_KERNEL);
 		}
 
 		if (dump_mem && *dump_mem) {
 			dump_addr = *dump_mem;
-			pr_info("%s: start_addr:0x%pK end_addr:0x%pK reg_addr=0x%pK\n",
-				dump_name, dump_addr,
-				dump_addr + len_bytes_aligned, addr);
+			dev_info(sde_dbg_base.dev,
+				"%s: start_addr:0x%pK len:0x%x reg_offset=0x%lx\n",
+				dump_name, dump_addr, len_padded,
+				addr - base_addr);
 		} else {
 			in_mem = 0;
 			pr_err("dump_mem: kzalloc fails!\n");
@@ -2011,7 +2021,7 @@
 	if (!from_isr)
 		_sde_dbg_enable_power(true);
 
-	for (i = 0; i < len_align_16; i++) {
+	for (i = 0; i < len_align; i++) {
 		u32 x0, x4, x8, xc;
 
 		x0 = (addr < end_addr) ? readl_relaxed(addr + 0x0) : 0;
@@ -2020,8 +2030,9 @@
 		xc = (addr + 0xc < end_addr) ? readl_relaxed(addr + 0xc) : 0;
 
 		if (in_log)
-			pr_info("%pK : %08x %08x %08x %08x\n", addr, x0, x4, x8,
-				xc);
+			dev_info(sde_dbg_base.dev,
+					"0x%lx : %08x %08x %08x %08x\n",
+					addr - base_addr, x0, x4, x8, xc);
 
 		if (dump_addr) {
 			dump_addr[i * 4] = x0;
@@ -2030,7 +2041,7 @@
 			dump_addr[i * 4 + 3] = xc;
 		}
 
-		addr += 16;
+		addr += REG_DUMP_ALIGN;
 	}
 
 	if (!from_isr)
@@ -2059,6 +2070,20 @@
 	return length;
 }
 
+static int _sde_dump_reg_range_cmp(void *priv, struct list_head *a,
+		struct list_head *b)
+{
+	struct sde_dbg_reg_range *ar, *br;
+
+	if (!a || !b)
+		return 0;
+
+	ar = container_of(a, struct sde_dbg_reg_range, head);
+	br = container_of(b, struct sde_dbg_reg_range, head);
+
+	return ar->offset.start - br->offset.start;
+}
+
 /**
  * _sde_dump_reg_by_ranges - dump ranges or full range of the register blk base
  * @dbg: register blk base structure
@@ -2076,10 +2101,13 @@
 		return;
 	}
 
-	pr_info("%s:=========%s DUMP=========\n", __func__, dbg->name);
+	dev_info(sde_dbg_base.dev, "%s:=========%s DUMP=========\n", __func__,
+			dbg->name);
 
 	/* If there is a list to dump the registers by ranges, use the ranges */
 	if (!list_empty(&dbg->sub_range_list)) {
+		/* sort the list by start address first */
+		list_sort(NULL, &dbg->sub_range_list, _sde_dump_reg_range_cmp);
 		list_for_each_entry(range_node, &dbg->sub_range_list, head) {
 			len = _sde_dbg_get_dump_range(&range_node->offset,
 				dbg->max_offset);
@@ -2089,18 +2117,20 @@
 				addr, range_node->offset.start,
 				range_node->offset.end);
 
-			_sde_dump_reg((const char *)range_node->range_name,
-				reg_dump_flag, addr, len, &range_node->reg_dump,
-				false);
+			_sde_dump_reg(range_node->range_name, reg_dump_flag,
+					dbg->base, addr, len,
+					&range_node->reg_dump, false);
 		}
 	} else {
 		/* If there is no list to dump ranges, dump all registers */
-		pr_info("Ranges not found, will dump full registers\n");
-		pr_info("base:0x%pK len:0x%zx\n", dbg->base, dbg->max_offset);
+		dev_info(sde_dbg_base.dev,
+				"Ranges not found, will dump full registers\n");
+		dev_info(sde_dbg_base.dev, "base:0x%pK len:0x%zx\n", dbg->base,
+				dbg->max_offset);
 		addr = dbg->base;
 		len = dbg->max_offset;
-		_sde_dump_reg((const char *)dbg->name, reg_dump_flag, addr,
-			len, &dbg->reg_dump, false);
+		_sde_dump_reg(dbg->name, reg_dump_flag, dbg->base, addr, len,
+				&dbg->reg_dump, false);
 	}
 }
 
@@ -2180,7 +2210,7 @@
 			reg_base_head)
 		if (strlen(reg_base->name) &&
 			!strcmp(reg_base->name, bus->cmn.name))
-			mem_base = reg_base->base;
+			mem_base = reg_base->base + bus->top_blk_off;
 
 	if (!mem_base) {
 		pr_err("unable to find mem_base for %s\n", bus->cmn.name);
@@ -2198,7 +2228,8 @@
 	if (!in_log && !in_mem)
 		return;
 
-	pr_info("======== start %s dump =========\n", bus->cmn.name);
+	dev_info(sde_dbg_base.dev, "======== start %s dump =========\n",
+			bus->cmn.name);
 
 	if (in_mem) {
 		if (!(*dump_mem))
@@ -2207,8 +2238,9 @@
 
 		if (*dump_mem) {
 			dump_addr = *dump_mem;
-			pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
-				__func__, dump_addr, dump_addr + list_size);
+			dev_info(sde_dbg_base.dev,
+				"%s: start_addr:0x%pK len:0x%x\n",
+				__func__, dump_addr, list_size);
 		} else {
 			in_mem = false;
 			pr_err("dump_mem: allocation fails\n");
@@ -2230,9 +2262,10 @@
 		status = readl_relaxed(mem_base + offset);
 
 		if (in_log)
-			pr_err("waddr=0x%x blk=%d tst=%d val=0x%x\n",
-				head->wr_addr, head->block_id, head->test_id,
-				status);
+			dev_info(sde_dbg_base.dev,
+					"waddr=0x%x blk=%d tst=%d val=0x%x\n",
+					head->wr_addr, head->block_id,
+					head->test_id, status);
 
 		if (dump_addr && in_mem) {
 			dump_addr[i*4]     = head->wr_addr;
@@ -2247,7 +2280,8 @@
 	}
 	_sde_dbg_enable_power(false);
 
-	pr_info("======== end %s dump =========\n", bus->cmn.name);
+	dev_info(sde_dbg_base.dev, "======== end %s dump =========\n",
+			bus->cmn.name);
 }
 
 static void _sde_dbg_dump_vbif_debug_bus_entry(
@@ -2277,7 +2311,8 @@
 				*dump_addr++ = val;
 			}
 			if (in_log)
-				pr_err("testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
+				dev_info(sde_dbg_base.dev,
+					"testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
 					head->block_bus_addr, i, j, val);
 		}
 	}
@@ -2316,7 +2351,8 @@
 	list_size = bus->cmn.entries_size;
 	dump_mem = &bus->cmn.dumped_content;
 
-	pr_info("======== start %s dump =========\n", bus->cmn.name);
+	dev_info(sde_dbg_base.dev, "======== start %s dump =========\n",
+			bus->cmn.name);
 
 	if (!dump_mem || !dbg_bus || !bus_size || !list_size)
 		return;
@@ -2343,8 +2379,9 @@
 
 		if (*dump_mem) {
 			dump_addr = *dump_mem;
-			pr_info("%s: start_addr:0x%pK end_addr:0x%pK\n",
-				__func__, dump_addr, dump_addr + list_size);
+			dev_info(sde_dbg_base.dev,
+				"%s: start_addr:0x%pK len:0x%x\n",
+				__func__, dump_addr, list_size);
 		} else {
 			in_mem = false;
 			pr_err("dump_mem: allocation fails\n");
@@ -2375,7 +2412,8 @@
 
 	_sde_dbg_enable_power(false);
 
-	pr_info("======== end %s dump =========\n", bus->cmn.name);
+	dev_info(sde_dbg_base.dev, "======== end %s dump =========\n",
+			bus->cmn.name);
 }
 
 /**
@@ -2863,24 +2901,19 @@
 	struct sde_dbg_reg_base *blk_base;
 	char debug_name[80] = "";
 
-	sde_dbg_base.root = debugfs_create_dir("evt_dbg", debugfs_root);
-	if (IS_ERR_OR_NULL(sde_dbg_base.root)) {
-		pr_err("debugfs_create_dir fail, error %ld\n",
-		       PTR_ERR(sde_dbg_base.root));
-		sde_dbg_base.root = NULL;
-		return -ENODEV;
-	}
+	if (!debugfs_root)
+		return -EINVAL;
 
-	debugfs_create_file("dump", 0644, sde_dbg_base.root, NULL,
+	debugfs_create_file("dump", 0644, debugfs_root, NULL,
 			&sde_evtlog_fops);
-	debugfs_create_u32("enable", 0644, sde_dbg_base.root,
+	debugfs_create_u32("enable", 0644, debugfs_root,
 			&(sde_dbg_base.evtlog->enable));
-	debugfs_create_file("filter", 0644, sde_dbg_base.root,
+	debugfs_create_file("filter", 0644, debugfs_root,
 			sde_dbg_base.evtlog,
 			&sde_evtlog_filter_fops);
-	debugfs_create_u32("panic", 0644, sde_dbg_base.root,
+	debugfs_create_u32("panic", 0644, debugfs_root,
 			&sde_dbg_base.panic_on_err);
-	debugfs_create_u32("reg_dump", 0644, sde_dbg_base.root,
+	debugfs_create_u32("reg_dump", 0644, debugfs_root,
 			&sde_dbg_base.enable_reg_dump);
 
 	if (dbg->dbgbus_sde.entries) {
@@ -2888,7 +2921,7 @@
 		snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
 				dbg->dbgbus_sde.cmn.name);
 		dbg->dbgbus_sde.cmn.enable_mask = DEFAULT_DBGBUS_SDE;
-		debugfs_create_u32(debug_name, 0644, dbg->root,
+		debugfs_create_u32(debug_name, 0644, debugfs_root,
 				&dbg->dbgbus_sde.cmn.enable_mask);
 	}
 
@@ -2897,36 +2930,28 @@
 		snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
 				dbg->dbgbus_vbif_rt.cmn.name);
 		dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
-		debugfs_create_u32(debug_name, 0644, dbg->root,
+		debugfs_create_u32(debug_name, 0644, debugfs_root,
 				&dbg->dbgbus_vbif_rt.cmn.enable_mask);
 	}
 
 	list_for_each_entry(blk_base, &dbg->reg_base_list, reg_base_head) {
 		snprintf(debug_name, sizeof(debug_name), "%s_off",
 				blk_base->name);
-		debugfs_create_file(debug_name, 0644, dbg->root, blk_base,
+		debugfs_create_file(debug_name, 0644, debugfs_root, blk_base,
 				&sde_off_fops);
 
 		snprintf(debug_name, sizeof(debug_name), "%s_reg",
 				blk_base->name);
-		debugfs_create_file(debug_name, 0644, dbg->root, blk_base,
+		debugfs_create_file(debug_name, 0644, debugfs_root, blk_base,
 				&sde_reg_fops);
 	}
 
 	return 0;
 }
 
-#ifdef CONFIG_DEBUG_FS
-static void _sde_dbg_debugfs_destroy(void)
-{
-	debugfs_remove_recursive(sde_dbg_base.root);
-	sde_dbg_base.root = 0;
-}
-#else
 static void _sde_dbg_debugfs_destroy(void)
 {
 }
-#endif
 
 void sde_dbg_init_dbg_buses(u32 hwversion)
 {
@@ -3051,6 +3076,21 @@
 		return;
 	}
 
+	if (!range_name || strlen(range_name) == 0) {
+		pr_err("%pS: bad range name, base_name %s, offset_start 0x%X, end 0x%X\n",
+				__builtin_return_address(0), base_name,
+				offset_start, offset_end);
+		return;
+	}
+
+	if (offset_end - offset_start < REG_DUMP_ALIGN ||
+			offset_start > offset_end) {
+		pr_err("%pS: bad range, base_name %s, range_name %s, offset_start 0x%X, end 0x%X\n",
+				__builtin_return_address(0), base_name,
+				range_name, offset_start, offset_end);
+		return;
+	}
+
 	range = kzalloc(sizeof(*range), GFP_KERNEL);
 	if (!range)
 		return;
@@ -3061,6 +3101,12 @@
 	range->xin_id = xin_id;
 	list_add_tail(&range->head, &reg_base->sub_range_list);
 
-	pr_debug("%s start: 0x%X end: 0x%X\n", range->range_name,
+	pr_debug("base %s, range %s, start 0x%X, end 0x%X\n",
+			base_name, range->range_name,
 			range->offset.start, range->offset.end);
 }
+
+void sde_dbg_set_sde_top_offset(u32 blk_off)
+{
+	sde_dbg_base.dbgbus_sde.top_blk_off = blk_off;
+}
diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h
index 4344eb8..02d46c7 100644
--- a/drivers/gpu/drm/msm/sde_dbg.h
+++ b/drivers/gpu/drm/msm/sde_dbg.h
@@ -20,6 +20,13 @@
 #define SDE_EVTLOG_DATA_LIMITER	(-1)
 #define SDE_EVTLOG_FUNC_ENTRY	0x1111
 #define SDE_EVTLOG_FUNC_EXIT	0x2222
+#define SDE_EVTLOG_FUNC_CASE1	0x3333
+#define SDE_EVTLOG_FUNC_CASE2	0x4444
+#define SDE_EVTLOG_FUNC_CASE3	0x5555
+#define SDE_EVTLOG_FUNC_CASE4	0x6666
+#define SDE_EVTLOG_FUNC_CASE5	0x7777
+#define SDE_EVTLOG_PANIC	0xdead
+#define SDE_EVTLOG_FATAL	0xbad
 
 #define SDE_DBG_DUMP_DATA_LIMITER (NULL)
 
@@ -36,7 +43,7 @@
 };
 
 #ifdef CONFIG_DRM_SDE_EVTLOG_DEBUG
-#define SDE_EVTLOG_DEFAULT_ENABLE SDE_EVTLOG_CRITICAL
+#define SDE_EVTLOG_DEFAULT_ENABLE (SDE_EVTLOG_CRITICAL | SDE_EVTLOG_IRQ)
 #else
 #define SDE_EVTLOG_DEFAULT_ENABLE 0
 #endif
@@ -258,6 +265,13 @@
 		uint32_t xin_id);
 
 /**
+ * sde_dbg_set_sde_top_offset - set the target specific offset from mdss base
+ *	address of the top registers. Used for accessing debug bus controls.
+ * @blk_off: offset from mdss base of the top block
+ */
+void sde_dbg_set_sde_top_offset(u32 blk_off);
+
+/**
  * sde_evtlog_set_filter - update evtlog filtering
  * @evtlog:	pointer to evtlog
  * @filter:     pointer to optional function name filter, set to NULL to disable
@@ -341,6 +355,10 @@
 {
 }
 
+void sde_dbg_set_sde_top_offset(u32 blk_off)
+{
+}
+
 static inline void sde_evtlog_set_filter(
 		struct sde_dbg_evtlog *evtlog, char *filter)
 {
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index a9a7d4f..c1b812a 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -413,12 +413,6 @@
 		if (client->current_state == SDE_RSC_VID_STATE)
 			goto end;
 
-	/* no need to enable solver again */
-	if (rsc->current_state == SDE_RSC_CLK_STATE) {
-		rc = 0;
-		goto end;
-	}
-
 	if (rsc->hw_ops.state_update)
 		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CMD_STATE);
 
@@ -440,14 +434,8 @@
 		    (client->current_state == SDE_RSC_CMD_STATE))
 			goto end;
 
-	/* no need to enable the solver again */
-	if (rsc->current_state == SDE_RSC_CMD_STATE) {
-		rc = 0;
-		goto end;
-	}
-
 	if (rsc->hw_ops.state_update)
-		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CMD_STATE);
+		rc = rsc->hw_ops.state_update(rsc, SDE_RSC_CLK_STATE);
 end:
 	return rc;
 }
@@ -1086,6 +1074,7 @@
 	sde_rsc_clk_enable(&rsc->phandle, rsc->pclient, false);
 
 	INIT_LIST_HEAD(&rsc->client_list);
+	INIT_LIST_HEAD(&rsc->event_list);
 	mutex_init(&rsc->client_lock);
 
 	pr_info("sde rsc index:%d probed successfully\n",
@@ -1095,6 +1084,7 @@
 	snprintf(name, MAX_RSC_CLIENT_NAME_LEN, "%s%d", "sde_rsc", counter);
 	_sde_rsc_init_debugfs(rsc, name);
 	counter++;
+	rsc->power_collapse = true;
 
 	ret = component_add(&pdev->dev, &sde_rsc_comp_ops);
 	if (ret)
diff --git a/drivers/gpu/drm/msm/sde_rsc_hw.c b/drivers/gpu/drm/msm/sde_rsc_hw.c
index fb963ee..de579c1 100644
--- a/drivers/gpu/drm/msm/sde_rsc_hw.c
+++ b/drivers/gpu/drm/msm/sde_rsc_hw.c
@@ -314,6 +314,20 @@
 	wrapper_status |= BIT(0);
 	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_CTRL,
 					wrapper_status, rsc->debug_mode);
+
+	/**
+	 * force busy and idle during clk & video mode state because it
+	 * is trying to entry in mode-2 without turning on the vysnc.
+	 */
+	if ((rsc->current_state == SDE_RSC_VID_STATE) ||
+			(rsc->current_state == SDE_RSC_CLK_STATE)) {
+		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
+				BIT(0) | BIT(1), rsc->debug_mode);
+		wmb(); /* force busy gurantee */
+		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
+				BIT(0) | BIT(9), rsc->debug_mode);
+	}
+
 	/* make sure that mode-2 is triggered before wait*/
 	wmb();
 
@@ -331,6 +345,13 @@
 		goto end;
 	}
 
+	if ((rsc->current_state == SDE_RSC_VID_STATE) ||
+			(rsc->current_state == SDE_RSC_CLK_STATE)) {
+		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
+					BIT(0) | BIT(8), rsc->debug_mode);
+		wmb(); /* force busy on vsync */
+	}
+
 	rsc_event_trigger(rsc, SDE_RSC_EVENT_POST_CORE_PC);
 
 	return 0;
@@ -343,13 +364,26 @@
 	return rc;
 }
 
-int sde_rsc_mode2_exit(struct sde_rsc_priv *rsc)
+int sde_rsc_mode2_exit(struct sde_rsc_priv *rsc, enum sde_rsc_state state)
 {
 	int rc = -EBUSY;
 	int count, reg;
 
 	rsc_event_trigger(rsc, SDE_RSC_EVENT_PRE_CORE_RESTORE);
 
+	/**
+	 * force busy and idle during clk & video mode state because it
+	 * is trying to entry in mode-2 without turning on the vysnc.
+	 */
+	if ((state == SDE_RSC_VID_STATE) || (state == SDE_RSC_CLK_STATE)) {
+		reg = dss_reg_r(&rsc->wrapper_io,
+			SDE_RSCC_WRAPPER_OVERRIDE_CTRL, rsc->debug_mode);
+		reg |= BIT(8);
+		reg &= ~(BIT(1) | BIT(0));
+		dss_reg_w(&rsc->wrapper_io, SDE_RSCC_WRAPPER_OVERRIDE_CTRL,
+							reg, rsc->debug_mode);
+	}
+
 	// needs review with HPG sequence
 	dss_reg_w(&rsc->wrapper_io, SDE_RSCC_F1_QTMR_V1_CNTP_CVAL_LO,
 					0x0, rsc->debug_mode);
@@ -405,7 +439,7 @@
 	int reg;
 
 	if (rsc->power_collapse) {
-		rc = sde_rsc_mode2_exit(rsc);
+		rc = sde_rsc_mode2_exit(rsc, state);
 		if (rc)
 			pr_err("power collapse: mode2 exit failed\n");
 		else
@@ -451,6 +485,10 @@
 		rsc_event_trigger(rsc, SDE_RSC_EVENT_SOLVER_DISABLED);
 		break;
 
+	case SDE_RSC_CLK_STATE:
+		pr_debug("clk state handling\n");
+		break;
+
 	case SDE_RSC_IDLE_STATE:
 		rc = sde_rsc_mode2_entry(rsc);
 		if (rc)
@@ -694,9 +732,6 @@
 	rsc->hw_ops.tcs_use_ok = rsc_hw_tcs_use_ok;
 	rsc->hw_ops.is_amc_mode = rsc_hw_is_amc_mode;
 
-	rsc->hw_ops.mode2_entry = sde_rsc_mode2_entry;
-	rsc->hw_ops.mode2_exit = sde_rsc_mode2_exit;
-
 	rsc->hw_ops.hw_vsync = rsc_hw_vsync;
 	rsc->hw_ops.state_update = sde_rsc_state_update;
 	rsc->hw_ops.debug_show = sde_rsc_debug_show;
diff --git a/drivers/gpu/drm/msm/sde_rsc_priv.h b/drivers/gpu/drm/msm/sde_rsc_priv.h
index 2563c85..30810fe 100644
--- a/drivers/gpu/drm/msm/sde_rsc_priv.h
+++ b/drivers/gpu/drm/msm/sde_rsc_priv.h
@@ -61,10 +61,6 @@
  *				TCS command.
  * @hw_vsync:			Enables the vsync on RSC block.
  * @tcs_use_ok:			set TCS set to high to allow RSC to use it.
- * @mode2_entry:		Request to entry mode2 when all clients are
- *                              requesting power collapse.
- * @mode2_exit:			Request to exit mode2 when one of the client
- *                              is requesting against the power collapse
  * @is_amc_mode:		Check current amc mode status
  * @state_update:		Enable/override the solver based on rsc state
  *                              status (command/video)
@@ -78,8 +74,6 @@
 	int (*hw_vsync)(struct sde_rsc_priv *rsc, enum rsc_vsync_req request,
 		char *buffer, int buffer_size, u32 mode);
 	int (*tcs_use_ok)(struct sde_rsc_priv *rsc);
-	int (*mode2_entry)(struct sde_rsc_priv *rsc);
-	int (*mode2_exit)(struct sde_rsc_priv *rsc);
 	bool (*is_amc_mode)(struct sde_rsc_priv *rsc);
 	int (*state_update)(struct sde_rsc_priv *rsc, enum sde_rsc_state state);
 	int (*debug_show)(struct seq_file *s, struct sde_rsc_priv *rsc);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 3de5e6e..4ce04e0 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -213,8 +213,8 @@
 			rbo->placement.num_busy_placement = 0;
 			for (i = 0; i < rbo->placement.num_placement; i++) {
 				if (rbo->placements[i].flags & TTM_PL_FLAG_VRAM) {
-					if (rbo->placements[0].fpfn < fpfn)
-						rbo->placements[0].fpfn = fpfn;
+					if (rbo->placements[i].fpfn < fpfn)
+						rbo->placements[i].fpfn = fpfn;
 				} else {
 					rbo->placement.busy_placement =
 						&rbo->placements[i];
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 7aadce1..c7e6c98 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -842,6 +842,17 @@
 	drm_atomic_helper_crtc_destroy_state(crtc, state);
 }
 
+static void
+vc4_crtc_reset(struct drm_crtc *crtc)
+{
+	if (crtc->state)
+		__drm_atomic_helper_crtc_destroy_state(crtc->state);
+
+	crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
+	if (crtc->state)
+		crtc->state->crtc = crtc;
+}
+
 static const struct drm_crtc_funcs vc4_crtc_funcs = {
 	.set_config = drm_atomic_helper_set_config,
 	.destroy = vc4_crtc_destroy,
@@ -849,7 +860,7 @@
 	.set_property = NULL,
 	.cursor_set = NULL, /* handled by drm_mode_cursor_universal */
 	.cursor_move = NULL, /* handled by drm_mode_cursor_universal */
-	.reset = drm_atomic_helper_crtc_reset,
+	.reset = vc4_crtc_reset,
 	.atomic_duplicate_state = vc4_crtc_duplicate_state,
 	.atomic_destroy_state = vc4_crtc_destroy_state,
 	.gamma_set = vc4_crtc_gamma_set,
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 2709aca..fddfb2c 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -537,6 +537,8 @@
 #define A6XX_UCHE_GMEM_RANGE_MAX_HI         0xE0E
 #define A6XX_UCHE_CACHE_WAYS                0xE17
 #define A6XX_UCHE_FILTER_CNTL               0xE18
+#define A6XX_UCHE_CLIENT_PF                 0xE19
+#define A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK  0x7
 #define A6XX_UCHE_PERFCTR_UCHE_SEL_0        0xE1C
 #define A6XX_UCHE_PERFCTR_UCHE_SEL_1        0xE1D
 #define A6XX_UCHE_PERFCTR_UCHE_SEL_2        0xE1E
@@ -685,6 +687,7 @@
 #define A6XX_GMU_RPMH_CTRL			0x1F8E8
 #define A6XX_GMU_RPMH_HYST_CTRL			0x1F8E9
 #define A6XX_GMU_RPMH_POWER_STATE		0x1F8EC
+#define A6XX_GMU_BOOT_KMD_LM_HANDSHAKE		0x1F9F0
 
 /* HFI registers*/
 #define A6XX_GMU_ALWAYS_ON_COUNTER_L		0x1F888
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index e23d6a0..75d5587 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -856,6 +856,8 @@
 				unsigned int arg1, unsigned int arg2);
 	bool (*hw_isidle)(struct adreno_device *);
 	int (*wait_for_gmu_idle)(struct adreno_device *);
+	const char *(*iommu_fault_block)(struct adreno_device *adreno_dev,
+				unsigned int fsynr1);
 };
 
 /**
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 54d4bf7..d278389 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -285,6 +285,8 @@
 	kgsl_regwrite(device, A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
 					  (1 << 30) | 0x4000);
 
+	kgsl_regwrite(device, A6XX_UCHE_CLIENT_PF, 1);
+
 	/* Set TWOPASSUSEWFI in A6XX_PC_DBG_ECO_CNTL if requested */
 	if (ADRENO_QUIRK(adreno_dev, ADRENO_QUIRK_TWO_PASS_USE_WFI))
 		kgsl_regrmw(device, A6XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
@@ -646,38 +648,40 @@
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct gmu_device *gmu = &device->gmu;
 
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC)) {
-		kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_SPTPRAC_HYST,
-			0x000A0080);
-		_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL,
-			SPTP_ENABLE_MASK);
-		gmu->idle_level = GPU_HW_SPTP_PC;
-	}
-
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_IFPC)) {
-		kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
-			0x000A0080);
-		_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL,
-			IFPC_ENABLE_MASK);
-		gmu->idle_level = GPU_HW_IFPC;
-	}
-
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_HW_NAP)) {
-		_gmu_regrmw(device, A6XX_GMU_GPU_NAP_CTRL,
-			HW_NAP_ENABLE_MASK);
-		gmu->idle_level = GPU_HW_NAP;
-	}
-
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_MIN_VOLT)) {
+	/* Configure registers for idle setting. The setting is cumulative */
+	switch (gmu->idle_level) {
+	case GPU_HW_MIN_VOLT:
 		_gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, MIN_BW_ENABLE_MASK);
 		_gmu_regrmw(device, A6XX_GMU_RPMH_HYST_CTRL, MIN_BW_HYST);
-		gmu->idle_level = GPU_HW_MIN_VOLT;
+		/* fall through */
+	case GPU_HW_NAP:
+		_gmu_regrmw(device, A6XX_GMU_GPU_NAP_CTRL, HW_NAP_ENABLE_MASK);
+		/* fall through */
+	case GPU_HW_IFPC:
+		kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
+				0x000A0080);
+		_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL,
+				IFPC_ENABLE_MASK);
+		/* fall through */
+	case GPU_HW_SPTP_PC:
+		kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_SPTPRAC_HYST,
+				0x000A0080);
+		_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL,
+				SPTP_ENABLE_MASK);
+		/* fall through */
+	default:
+		break;
 	}
 
+	/* ACD feature enablement */
+	if (ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+		_gmu_regrmw(device, A6XX_GMU_BOOT_KMD_LM_HANDSHAKE, BIT(10));
+
 	/* Enable RPMh GPU client */
 	if (ADRENO_FEATURE(adreno_dev, ADRENO_RPMH))
 		_gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, RPMH_ENABLE_MASK);
 
+	/* Disable reference bandgap voltage */
 	kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 1);
 }
 
@@ -1127,8 +1131,6 @@
 	struct gmu_memdesc *mem_addr = gmu->hfi_mem;
 	int ret, i;
 
-	a6xx_gmu_power_config(device);
-
 	if (boot_state == GMU_COLD_BOOT || boot_state == GMU_RESET) {
 		/* Turn on the HM and SPTP head switches */
 		ret = a6xx_hm_sptprac_control(device, true);
@@ -1173,6 +1175,8 @@
 	kgsl_gmu_regwrite(device, A6XX_GMU_AHB_FENCE_RANGE_0,
 			FENCE_RANGE_MASK);
 
+	/* Configure power control and bring the GMU out of reset */
+	a6xx_gmu_power_config(device);
 	ret = a6xx_gmu_start(device);
 	if (ret)
 		return ret;
@@ -1522,6 +1526,46 @@
 	iounmap(gpu_cx_reg);
 }
 
+static const char *fault_block[8] = {
+	[0] = "CP",
+	[1] = "UCHE",
+	[2] = "VFD",
+	[3] = "UCHE",
+	[4] = "CCU",
+	[5] = "unknown",
+	[6] = "CDP Prefetch",
+	[7] = "GPMU",
+};
+
+static const char *uche_client[8] = {
+	[0] = "VFD",
+	[1] = "SP",
+	[2] = "VSC",
+	[3] = "VPC",
+	[4] = "HLSQ",
+	[5] = "PC",
+	[6] = "LRZ",
+	[7] = "unknown",
+};
+
+static const char *a6xx_iommu_fault_block(struct adreno_device *adreno_dev,
+						unsigned int fsynr1)
+{
+	struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+	unsigned int client_id;
+	unsigned int uche_client_id;
+
+	client_id = fsynr1 & 0xff;
+
+	if (client_id >= ARRAY_SIZE(fault_block))
+		return "unknown";
+	else if (client_id != 3)
+		return fault_block[client_id];
+
+	kgsl_regread(device, A6XX_UCHE_CLIENT_PF, &uche_client_id);
+	return uche_client[uche_client_id & A6XX_UCHE_CLIENT_PF_CLIENT_ID_MASK];
+}
+
 #define A6XX_INT_MASK \
 	((1 << A6XX_INT_CP_AHB_ERROR) |			\
 	 (1 << A6XX_INT_ATB_ASYNCFIFO_OVERFLOW) |	\
@@ -2078,5 +2122,6 @@
 	.oob_clear = a6xx_oob_clear,
 	.rpmh_gpu_pwrctrl = a6xx_rpmh_gpu_pwrctrl,
 	.hw_isidle = a6xx_hw_isidle, /* Replaced by NULL if GMU is disabled */
-	.wait_for_gmu_idle = a6xx_wait_for_gmu_idle
+	.wait_for_gmu_idle = a6xx_wait_for_gmu_idle,
+	.iommu_fault_block = a6xx_iommu_fault_block,
 };
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index d1c84f1..2283096 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -261,8 +261,11 @@
 {
 	struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
 
-	if (entry != NULL)
+	if (entry != NULL) {
 		kref_init(&entry->refcount);
+		/* put this ref in userspace memory alloc and map ioctls */
+		kref_get(&entry->refcount);
+	}
 
 	return entry;
 }
@@ -1775,9 +1778,9 @@
 	/* Commit the pointer to the context in context_idr */
 	write_lock(&device->context_lock);
 	idr_replace(&device->context_idr, context, context->id);
+	param->drawctxt_id = context->id;
 	write_unlock(&device->context_lock);
 
-	param->drawctxt_id = context->id;
 done:
 	return result;
 }
@@ -2412,6 +2415,10 @@
 	trace_kgsl_mem_map(entry, fd);
 
 	kgsl_mem_entry_commit_process(entry);
+
+	/* Put the extra ref from kgsl_mem_entry_create() */
+	kgsl_mem_entry_put(entry);
+
 	return 0;
 
 unmap:
@@ -2718,6 +2725,10 @@
 	trace_kgsl_mem_map(entry, param->fd);
 
 	kgsl_mem_entry_commit_process(entry);
+
+	/* Put the extra ref from kgsl_mem_entry_create() */
+	kgsl_mem_entry_put(entry);
+
 	return result;
 
 error_attach:
@@ -3155,6 +3166,9 @@
 	param->mmapsize = kgsl_memdesc_footprint(&entry->memdesc);
 	param->id = entry->id;
 
+	/* Put the extra ref from kgsl_mem_entry_create() */
+	kgsl_mem_entry_put(entry);
+
 	return 0;
 }
 
@@ -3178,6 +3192,9 @@
 	param->size = (size_t) entry->memdesc.size;
 	param->flags = (unsigned int) entry->memdesc.flags;
 
+	/* Put the extra ref from kgsl_mem_entry_create() */
+	kgsl_mem_entry_put(entry);
+
 	return 0;
 }
 
@@ -3201,6 +3218,9 @@
 	param->mmapsize = (size_t) kgsl_memdesc_footprint(&entry->memdesc);
 	param->gpuaddr = (unsigned long) entry->memdesc.gpuaddr;
 
+	/* Put the extra ref from kgsl_mem_entry_create() */
+	kgsl_mem_entry_put(entry);
+
 	return 0;
 }
 
@@ -3318,6 +3338,9 @@
 	trace_sparse_phys_alloc(entry->id, param->size, param->pagesize);
 	kgsl_mem_entry_commit_process(entry);
 
+	/* Put the extra ref from kgsl_mem_entry_create() */
+	kgsl_mem_entry_put(entry);
+
 	return 0;
 
 err_invalid_pages:
@@ -3397,6 +3420,9 @@
 	trace_sparse_virt_alloc(entry->id, param->size, param->pagesize);
 	kgsl_mem_entry_commit_process(entry);
 
+	/* Put the extra ref from kgsl_mem_entry_create() */
+	kgsl_mem_entry_put(entry);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 2e9f108..97e4b6f 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -986,6 +986,7 @@
 	struct gmu_memdesc *mem_addr = NULL;
 	struct kgsl_hfi *hfi = &gmu->hfi;
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	int i = 0, ret = -ENXIO;
 
 	node = of_find_compatible_node(device->pdev->dev.of_node,
@@ -1086,7 +1087,17 @@
 
 	hfi_init(&gmu->hfi, mem_addr, HFI_QUEUE_SIZE);
 
-	gmu->idle_level = GPU_HW_ACTIVE;
+	/* Set up GMU idle states */
+	if (ADRENO_FEATURE(adreno_dev, ADRENO_MIN_VOLT))
+		gmu->idle_level = GPU_HW_MIN_VOLT;
+	else if (ADRENO_FEATURE(adreno_dev, ADRENO_HW_NAP))
+		gmu->idle_level = GPU_HW_NAP;
+	else if (ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
+		gmu->idle_level = GPU_HW_IFPC;
+	else if (ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC))
+		gmu->idle_level = GPU_HW_SPTP_PC;
+	else
+		gmu->idle_level = GPU_HW_ACTIVE;
 
 	return 0;
 
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 0325db8..86d4d61 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -797,6 +797,7 @@
 	int write;
 	struct kgsl_device *device;
 	struct adreno_device *adreno_dev;
+	struct adreno_gpudev *gpudev;
 	unsigned int no_page_fault_log = 0;
 	unsigned int curr_context_id = 0;
 	struct kgsl_context *context;
@@ -813,6 +814,7 @@
 	ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
 	device = KGSL_MMU_DEVICE(mmu);
 	adreno_dev = ADRENO_DEVICE(device);
+	gpudev = ADRENO_GPU_DEVICE(adreno_dev);
 
 	if (pt->name == KGSL_MMU_SECURE_PT)
 		ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
@@ -886,6 +888,16 @@
 			ctx->name, ptbase, contextidr,
 			write ? "write" : "read", fault_type);
 
+		if (gpudev->iommu_fault_block) {
+			unsigned int fsynr1;
+
+			fsynr1 = KGSL_IOMMU_GET_CTX_REG(ctx, FSYNR1);
+			KGSL_MEM_CRIT(ctx->kgsldev,
+				"FAULTING BLOCK: %s\n",
+				gpudev->iommu_fault_block(adreno_dev,
+								fsynr1));
+		}
+
 		/* Don't print the debug if this is a permissions fault */
 		if (!(flags & IOMMU_FAULT_PERMISSION)) {
 			_check_if_freed(ctx, addr, ptname);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 5e7a564..0c535d0 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -2017,6 +2017,14 @@
 
 	wacom_update_name(wacom, wireless ? " (WL)" : "");
 
+	/* pen only Bamboo neither support touch nor pad */
+	if ((features->type == BAMBOO_PEN) &&
+	    ((features->device_type & WACOM_DEVICETYPE_TOUCH) ||
+	    (features->device_type & WACOM_DEVICETYPE_PAD))) {
+		error = -ENODEV;
+		goto fail;
+	}
+
 	error = wacom_add_shared_data(hdev);
 	if (error)
 		goto fail;
@@ -2064,14 +2072,6 @@
 		goto fail_quirks;
 	}
 
-	/* pen only Bamboo neither support touch nor pad */
-	if ((features->type == BAMBOO_PEN) &&
-	    ((features->device_type & WACOM_DEVICETYPE_TOUCH) ||
-	    (features->device_type & WACOM_DEVICETYPE_PAD))) {
-		error = -ENODEV;
-		goto fail_quirks;
-	}
-
 	if (features->device_type & WACOM_DEVICETYPE_WL_MONITOR)
 		error = hid_hw_open(hdev);
 
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 3a4474d..94b2e2f9 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -578,27 +578,29 @@
 }
 EXPORT_SYMBOL_GPL(coresight_enable);
 
-void coresight_disable(struct coresight_device *csdev)
+static void __coresight_disable(struct coresight_device *csdev)
 {
 	int  ret;
 
-	mutex_lock(&coresight_mutex);
-
 	ret = coresight_validate_source(csdev, __func__);
 	if (ret)
-		goto out;
+		return;
 
 	if (!csdev->enable)
-		goto out;
+		return;
 
 	if (csdev->node == NULL)
-		goto out;
+		return;
 
 	coresight_disable_source(csdev);
 	coresight_disable_path(csdev->node->path);
 	coresight_release_path(csdev, csdev->node->path);
+}
 
-out:
+void coresight_disable(struct coresight_device *csdev)
+{
+	mutex_lock(&coresight_mutex);
+	__coresight_disable(csdev);
 	mutex_unlock(&coresight_mutex);
 }
 EXPORT_SYMBOL_GPL(coresight_disable);
@@ -904,7 +906,7 @@
 		csdev = coresight_get_source(cspath->path);
 		if (!csdev)
 			continue;
-		coresight_disable(csdev);
+		__coresight_disable(csdev);
 	}
 
 	mutex_unlock(&coresight_mutex);
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index e412230..b521df6 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -38,6 +38,7 @@
 #define FG_ADC_RR_FAKE_BATT_HIGH_MSB		0x5B
 
 #define FG_ADC_RR_BATT_ID_CTRL			0x60
+#define FG_ADC_RR_BATT_ID_CTRL_CHANNEL_CONV	BIT(0)
 #define FG_ADC_RR_BATT_ID_TRIGGER		0x61
 #define FG_ADC_RR_BATT_ID_TRIGGER_CTL		BIT(0)
 #define FG_ADC_RR_BATT_ID_STS			0x62
@@ -748,6 +749,75 @@
 	return rc;
 }
 
+static int rradc_enable_batt_id_channel(struct rradc_chip *chip, bool enable)
+{
+	int rc = 0;
+
+	if (enable) {
+		rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_CTRL,
+				FG_ADC_RR_BATT_ID_CTRL_CHANNEL_CONV,
+				FG_ADC_RR_BATT_ID_CTRL_CHANNEL_CONV);
+		if (rc < 0) {
+			pr_err("Enabling BATT ID channel failed:%d\n", rc);
+			return rc;
+		}
+	} else {
+		rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_CTRL,
+				FG_ADC_RR_BATT_ID_CTRL_CHANNEL_CONV, 0);
+		if (rc < 0) {
+			pr_err("Disabling BATT ID channel failed:%d\n", rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int rradc_do_batt_id_conversion(struct rradc_chip *chip,
+		struct rradc_chan_prop *prop, u16 *data, u8 *buf)
+{
+	int rc = 0, ret = 0;
+
+	rc = rradc_enable_batt_id_channel(chip, true);
+	if (rc < 0) {
+		pr_err("Enabling BATT ID channel failed:%d\n", rc);
+		return rc;
+	}
+
+	rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_TRIGGER,
+				FG_ADC_RR_BATT_ID_TRIGGER_CTL,
+				FG_ADC_RR_BATT_ID_TRIGGER_CTL);
+	if (rc < 0) {
+		pr_err("BATT_ID trigger set failed:%d\n", rc);
+		ret = rc;
+		rc = rradc_enable_batt_id_channel(chip, false);
+		if (rc < 0)
+			pr_err("Disabling BATT ID channel failed:%d\n", rc);
+		return ret;
+	}
+
+	rc = rradc_read_channel_with_continuous_mode(chip, prop, buf);
+	if (rc < 0) {
+		pr_err("Error reading in continuous mode:%d\n", rc);
+		ret = rc;
+	}
+
+	rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_TRIGGER,
+			FG_ADC_RR_BATT_ID_TRIGGER_CTL, 0);
+	if (rc < 0) {
+		pr_err("BATT_ID trigger re-set failed:%d\n", rc);
+		ret = rc;
+	}
+
+	rc = rradc_enable_batt_id_channel(chip, false);
+	if (rc < 0) {
+		pr_err("Disabling BATT ID channel failed:%d\n", rc);
+		ret = rc;
+	}
+
+	return ret;
+}
+
 static int rradc_do_conversion(struct rradc_chip *chip,
 			struct rradc_chan_prop *prop, u16 *data)
 {
@@ -760,24 +830,9 @@
 
 	switch (prop->channel) {
 	case RR_ADC_BATT_ID:
-		rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_TRIGGER,
-				FG_ADC_RR_BATT_ID_TRIGGER_CTL,
-				FG_ADC_RR_BATT_ID_TRIGGER_CTL);
+		rc = rradc_do_batt_id_conversion(chip, prop, data, buf);
 		if (rc < 0) {
-			pr_err("BATT_ID trigger set failed:%d\n", rc);
-			goto fail;
-		}
-
-		rc = rradc_read_channel_with_continuous_mode(chip, prop, buf);
-		if (rc < 0) {
-			pr_err("Error reading in continuous mode:%d\n", rc);
-			goto fail;
-		}
-
-		rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_TRIGGER,
-				FG_ADC_RR_BATT_ID_TRIGGER_CTL, 0);
-		if (rc < 0) {
-			pr_err("BATT_ID trigger re-set failed:%d\n", rc);
+			pr_err("Battery ID conversion failed:%d\n", rc);
 			goto fail;
 		}
 		break;
diff --git a/drivers/iio/adc/qcom-tadc.c b/drivers/iio/adc/qcom-tadc.c
index 9241288..05b1985 100644
--- a/drivers/iio/adc/qcom-tadc.c
+++ b/drivers/iio/adc/qcom-tadc.c
@@ -18,7 +18,12 @@
 #include <linux/of_irq.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
+#include <linux/power_supply.h>
+#include <linux/pmic-voter.h>
 
+#define USB_PRESENT_VOTER			"USB_PRESENT_VOTER"
+#define SLEEP_VOTER				"SLEEP_VOTER"
+#define SHUTDOWN_VOTER				"SHUTDOWN_VOTER"
 #define TADC_REVISION1_REG			0x00
 #define TADC_REVISION2_REG			0x01
 #define TADC_REVISION3_REG			0x02
@@ -54,6 +59,7 @@
 #define TADC_CH7_ADC_HI_REG(chip)		(chip->tadc_base + 0x73)
 #define TADC_CH8_ADC_LO_REG(chip)		(chip->tadc_base + 0x74)
 #define TADC_CH8_ADC_HI_REG(chip)		(chip->tadc_base + 0x75)
+#define TADC_ADC_DIRECT_TST(chip)		(chip->tadc_base + 0xE7)
 
 /* TADC_CMP register definitions */
 #define TADC_CMP_THR1_CMP_REG(chip)		(chip->tadc_cmp_base + 0x51)
@@ -217,6 +223,12 @@
 	struct tadc_chan_data	chans[TADC_NUM_CH];
 	struct completion	eoc_complete;
 	struct mutex		write_lock;
+	struct mutex		conv_lock;
+	struct power_supply	*usb_psy;
+	struct votable		*tadc_disable_votable;
+	struct work_struct	status_change_work;
+	struct notifier_block	nb;
+	u8			hwtrig_conv;
 };
 
 struct tadc_pt {
@@ -274,7 +286,7 @@
 	if ((reg & 0xFF00) == chip->tadc_cmp_base)
 		return true;
 
-	if (reg == TADC_HWTRIG_CONV_CH_EN_REG(chip))
+	if (reg >= TADC_HWTRIG_CONV_CH_EN_REG(chip))
 		return true;
 
 	return false;
@@ -345,6 +357,26 @@
 	return rc;
 }
 
+static int tadc_masked_write(struct tadc_chip *chip, u16 reg, u8 mask, u8 data)
+{
+	int rc = 0;
+
+	mutex_lock(&chip->write_lock);
+	if (tadc_is_reg_locked(chip, reg)) {
+		rc = regmap_write(chip->regmap, (reg & 0xFF00) | 0xD0, 0xA5);
+		if (rc < 0) {
+			pr_err("Couldn't unlock secure register rc=%d\n", rc);
+			goto unlock;
+		}
+	}
+
+	rc = regmap_update_bits(chip->regmap, reg, mask, data);
+
+unlock:
+	mutex_unlock(&chip->write_lock);
+	return rc;
+}
+
 static int tadc_lerp(const struct tadc_pt *pts, size_t size, bool inv,
 							s32 input, s32 *output)
 {
@@ -480,12 +512,22 @@
 {
 	unsigned long timeout, timeleft;
 	u8 val[TADC_NUM_CH * 2];
-	int rc, i;
+	int rc = 0, i;
 
+	mutex_lock(&chip->conv_lock);
 	rc = tadc_read(chip, TADC_MBG_ERR_REG(chip), val, 1);
 	if (rc < 0) {
 		pr_err("Couldn't read mbg error status rc=%d\n", rc);
-		return rc;
+		goto unlock;
+	}
+
+	reinit_completion(&chip->eoc_complete);
+
+	if (get_effective_result(chip->tadc_disable_votable)) {
+		/* leave it back in completed state */
+		complete_all(&chip->eoc_complete);
+		rc = -ENODATA;
+		goto unlock;
 	}
 
 	if (val[0] != 0) {
@@ -496,7 +538,7 @@
 	rc = tadc_write(chip, TADC_CONV_REQ_REG(chip), channels);
 	if (rc < 0) {
 		pr_err("Couldn't write conversion request rc=%d\n", rc);
-		return rc;
+		goto unlock;
 	}
 
 	timeout = msecs_to_jiffies(CONVERSION_TIMEOUT_MS);
@@ -506,25 +548,34 @@
 		rc = tadc_read(chip, TADC_SW_CH_CONV_REG(chip), val, 1);
 		if (rc < 0) {
 			pr_err("Couldn't read conversion status rc=%d\n", rc);
-			return rc;
+			goto unlock;
 		}
 
+		/*
+		 * check one last time if the channel we are requesting
+		 * has completed conversion
+		 */
 		if (val[0] != channels) {
-			pr_err("Conversion timed out\n");
-			return -ETIMEDOUT;
+			rc = -ETIMEDOUT;
+			goto unlock;
 		}
 	}
 
 	rc = tadc_read(chip, TADC_CH1_ADC_LO_REG(chip), val, ARRAY_SIZE(val));
 	if (rc < 0) {
 		pr_err("Couldn't read adc channels rc=%d\n", rc);
-		return rc;
+		goto unlock;
 	}
 
 	for (i = 0; i < TADC_NUM_CH; i++)
 		adc[i] = (s16)(val[i * 2] | (u16)val[i * 2 + 1] << 8);
 
-	return jiffies_to_msecs(timeout - timeleft);
+	pr_debug("Conversion time for channels 0x%x = %dms\n", channels,
+			jiffies_to_msecs(timeout - timeleft));
+
+unlock:
+	mutex_unlock(&chip->conv_lock);
+	return rc;
 }
 
 static int tadc_read_raw(struct iio_dev *indio_dev,
@@ -593,12 +644,17 @@
 			break;
 		default:
 			rc = tadc_do_conversion(chip, BIT(chan->channel), adc);
-			if (rc >= 0)
-				*val = adc[chan->channel];
+			if (rc < 0) {
+				if (rc != -ENODATA)
+					pr_err("Couldn't read battery current and voltage channels rc=%d\n",
+									rc);
+				return rc;
+			}
+			*val = adc[chan->channel];
 			break;
 		}
 
-		if (rc < 0) {
+		if (rc < 0 && rc != -ENODATA) {
 			pr_err("Couldn't read channel %d\n", chan->channel);
 			return rc;
 		}
@@ -630,7 +686,7 @@
 		case TADC_BATT_P:
 			rc = tadc_do_conversion(chip,
 				BIT(TADC_BATT_I) | BIT(TADC_BATT_V), adc);
-			if (rc < 0) {
+			if (rc < 0 && rc != -ENODATA) {
 				pr_err("Couldn't read battery current and voltage channels rc=%d\n",
 									rc);
 				return rc;
@@ -641,7 +697,7 @@
 		case TADC_INPUT_P:
 			rc = tadc_do_conversion(chip,
 				BIT(TADC_INPUT_I) | BIT(TADC_INPUT_V), adc);
-			if (rc < 0) {
+			if (rc < 0 && rc != -ENODATA) {
 				pr_err("Couldn't read input current and voltage channels rc=%d\n",
 									rc);
 				return rc;
@@ -683,6 +739,7 @@
 		case TADC_DIE_TEMP:
 		case TADC_DIE_TEMP_THR1:
 		case TADC_DIE_TEMP_THR2:
+		case TADC_DIE_TEMP_THR3:
 			*val = chan_data->scale;
 			return IIO_VAL_INT;
 		case TADC_BATT_I:
@@ -821,15 +878,137 @@
 	return 0;
 }
 
-
 static irqreturn_t handle_eoc(int irq, void *dev_id)
 {
 	struct tadc_chip *chip = dev_id;
 
-	complete(&chip->eoc_complete);
+	complete_all(&chip->eoc_complete);
 	return IRQ_HANDLED;
 }
 
+static int tadc_disable_vote_callback(struct votable *votable,
+			void *data, int disable, const char *client)
+{
+	struct tadc_chip *chip = data;
+	int rc;
+	int timeout;
+	unsigned long timeleft;
+
+	if (disable) {
+		timeout = msecs_to_jiffies(CONVERSION_TIMEOUT_MS);
+		timeleft = wait_for_completion_timeout(&chip->eoc_complete,
+				timeout);
+		if (timeleft == 0)
+			pr_err("Timed out waiting for eoc, disabling hw conversions regardless\n");
+
+		rc = tadc_read(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip),
+							&chip->hwtrig_conv, 1);
+		if (rc < 0) {
+			pr_err("Couldn't save hw conversions rc=%d\n", rc);
+			return rc;
+		}
+		rc = tadc_write(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip), 0x00);
+		if (rc < 0) {
+			pr_err("Couldn't disable hw conversions rc=%d\n", rc);
+			return rc;
+		}
+		rc = tadc_write(chip, TADC_ADC_DIRECT_TST(chip), 0x80);
+		if (rc < 0) {
+			pr_err("Couldn't enable direct test mode rc=%d\n", rc);
+			return rc;
+		}
+	} else {
+		rc = tadc_write(chip, TADC_ADC_DIRECT_TST(chip), 0x00);
+		if (rc < 0) {
+			pr_err("Couldn't disable direct test mode rc=%d\n", rc);
+			return rc;
+		}
+		rc = tadc_write(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip),
+							chip->hwtrig_conv);
+		if (rc < 0) {
+			pr_err("Couldn't restore hw conversions rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	pr_debug("client: %s disable: %d\n", client, disable);
+	return 0;
+}
+
+static void status_change_work(struct work_struct *work)
+{
+	struct tadc_chip *chip = container_of(work,
+			struct tadc_chip, status_change_work);
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	if (!chip->usb_psy)
+		chip->usb_psy = power_supply_get_by_name("usb");
+
+	if (!chip->usb_psy) {
+		/* treat usb is not present */
+		vote(chip->tadc_disable_votable, USB_PRESENT_VOTER, true, 0);
+		return;
+	}
+
+	rc = power_supply_get_property(chip->usb_psy,
+		       POWER_SUPPLY_PROP_PRESENT, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get present status rc=%d\n", rc);
+		/* treat usb is not present */
+		vote(chip->tadc_disable_votable, USB_PRESENT_VOTER, true, 0);
+		return;
+	}
+
+	/* disable if usb is not present */
+	vote(chip->tadc_disable_votable, USB_PRESENT_VOTER, !pval.intval, 0);
+}
+
+static int tadc_notifier_call(struct notifier_block *nb,
+		unsigned long ev, void *v)
+{
+	struct power_supply *psy = v;
+	struct tadc_chip *chip = container_of(nb, struct tadc_chip, nb);
+
+	if (ev != PSY_EVENT_PROP_CHANGED)
+		return NOTIFY_OK;
+
+	if ((strcmp(psy->desc->name, "usb") == 0))
+		schedule_work(&chip->status_change_work);
+
+	return NOTIFY_OK;
+}
+
+static int tadc_register_notifier(struct tadc_chip *chip)
+{
+	int rc;
+
+	chip->nb.notifier_call = tadc_notifier_call;
+	rc = power_supply_reg_notifier(&chip->nb);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int tadc_suspend(struct device *dev)
+{
+	struct tadc_chip *chip = dev_get_drvdata(dev);
+
+	vote(chip->tadc_disable_votable, SLEEP_VOTER, true, 0);
+	return 0;
+}
+
+static int tadc_resume(struct device *dev)
+{
+	struct tadc_chip *chip = dev_get_drvdata(dev);
+
+	vote(chip->tadc_disable_votable, SLEEP_VOTER, false, 0);
+	return 0;
+}
+
 static int tadc_set_therm_table(struct tadc_chan_data *chan_data, u32 beta,
 				u32 rtherm)
 {
@@ -975,16 +1154,23 @@
 		return rc;
 	}
 
-	/* enable all temperature hardware triggers */
-	rc = tadc_write(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip),
-							BIT(TADC_THERM1) |
-							BIT(TADC_THERM2) |
-							BIT(TADC_DIE_TEMP));
+	/* enable connector and die temp hardware triggers */
+	rc = tadc_masked_write(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip),
+					BIT(TADC_THERM2) | BIT(TADC_DIE_TEMP),
+					BIT(TADC_THERM2) | BIT(TADC_DIE_TEMP));
 	if (rc < 0) {
 		pr_err("Couldn't enable hardware triggers rc=%d\n", rc);
 		return rc;
 	}
 
+	/* save hw triggered conversion configuration */
+	rc = tadc_read(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip),
+							&chip->hwtrig_conv, 1);
+	if (rc < 0) {
+		pr_err("Couldn't save hw conversions rc=%d\n", rc);
+		return rc;
+	}
+
 	return 0;
 }
 
@@ -1009,6 +1195,12 @@
 	chip->dev = &pdev->dev;
 	init_completion(&chip->eoc_complete);
 
+	/*
+	 * set the completion in "completed" state so disable of the tadc
+	 * can progress
+	 */
+	complete_all(&chip->eoc_complete);
+
 	rc = of_property_read_u32(node, "reg", &chip->tadc_base);
 	if (rc < 0) {
 		pr_err("Couldn't read base address rc=%d\n", rc);
@@ -1017,6 +1209,8 @@
 	chip->tadc_cmp_base = chip->tadc_base + 0x100;
 
 	mutex_init(&chip->write_lock);
+	mutex_init(&chip->conv_lock);
+	INIT_WORK(&chip->status_change_work, status_change_work);
 	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
 	if (!chip->regmap) {
 		pr_err("Couldn't get regmap\n");
@@ -1035,17 +1229,36 @@
 		return rc;
 	}
 
+	chip->tadc_disable_votable = create_votable("SMB_TADC_DISABLE",
+					VOTE_SET_ANY,
+					tadc_disable_vote_callback,
+					chip);
+	if (IS_ERR(chip->tadc_disable_votable)) {
+		rc = PTR_ERR(chip->tadc_disable_votable);
+		return rc;
+	}
+	/* assume usb is not present */
+	vote(chip->tadc_disable_votable, USB_PRESENT_VOTER, true, 0);
+	vote(chip->tadc_disable_votable, SHUTDOWN_VOTER, false, 0);
+	vote(chip->tadc_disable_votable, SLEEP_VOTER, false, 0);
+
+	rc = tadc_register_notifier(chip);
+	if (rc < 0) {
+		pr_err("Couldn't register notifier=%d\n", rc);
+		goto destroy_votable;
+	}
+
 	irq = of_irq_get_byname(node, "eoc");
 	if (irq < 0) {
 		pr_err("Couldn't get eoc irq rc=%d\n", irq);
-		return irq;
+		goto destroy_votable;
 	}
 
 	rc = devm_request_threaded_irq(chip->dev, irq, NULL, handle_eoc,
 						IRQF_ONESHOT, "eoc", chip);
 	if (rc < 0) {
 		pr_err("Couldn't request irq %d rc=%d\n", irq, rc);
-		return rc;
+		goto destroy_votable;
 	}
 
 	indio_dev->dev.parent = chip->dev;
@@ -1058,17 +1271,37 @@
 	rc = devm_iio_device_register(chip->dev, indio_dev);
 	if (rc < 0) {
 		pr_err("Couldn't register IIO device rc=%d\n", rc);
-		return rc;
+		goto destroy_votable;
 	}
 
+	platform_set_drvdata(pdev, chip);
 	return 0;
+
+destroy_votable:
+	destroy_votable(chip->tadc_disable_votable);
+	return rc;
 }
 
 static int tadc_remove(struct platform_device *pdev)
 {
+	struct tadc_chip *chip = platform_get_drvdata(pdev);
+
+	destroy_votable(chip->tadc_disable_votable);
 	return 0;
 }
 
+static void tadc_shutdown(struct platform_device *pdev)
+{
+	struct tadc_chip *chip = platform_get_drvdata(pdev);
+
+	vote(chip->tadc_disable_votable, SHUTDOWN_VOTER, true, 0);
+}
+
+static const struct dev_pm_ops tadc_pm_ops = {
+	.resume		= tadc_resume,
+	.suspend	= tadc_suspend,
+};
+
 static const struct of_device_id tadc_match_table[] = {
 	{ .compatible = "qcom,tadc" },
 	{ }
@@ -1076,12 +1309,14 @@
 MODULE_DEVICE_TABLE(of, tadc_match_table);
 
 static struct platform_driver tadc_driver = {
-	.driver	= {
+	.driver		= {
 		.name		= "qcom-tadc",
 		.of_match_table	= tadc_match_table,
+		.pm		= &tadc_pm_ops,
 	},
-	.probe	= tadc_probe,
-	.remove	= tadc_remove,
+	.probe		= tadc_probe,
+	.remove		= tadc_remove,
+	.shutdown	= tadc_shutdown,
 };
 module_platform_driver(tadc_driver);
 
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index 34c7381..aded314 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -17,7 +17,8 @@
 #include <linux/vmalloc.h>
 #include <asm/cacheflush.h>
 #include <asm/dma-iommu.h>
-
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
 
 /* some redundant definitions... :( TODO: move to io-pgtable-fast.h */
 #define FAST_PAGE_SHIFT		12
@@ -633,7 +634,7 @@
 	dev_err(fast->dev, "Mapped over stale tlb at %pa\n", &iova);
 	dev_err(fast->dev, "bitmap (failure at idx %lu):\n", bitmap_idx);
 	dev_err(fast->dev, "ptep: %p pmds: %p diff: %lu\n", ptep,
-		fast->pgtbl_pmds, ptep - fast->pgtbl_pmds);
+		fast->pgtbl_pmds, bitmap_idx);
 	print_hex_dump(KERN_ERR, "bmap: ", DUMP_PREFIX_ADDRESS,
 		       32, 8, fast->bitmap, fast->bitmap_size, false);
 }
@@ -683,7 +684,7 @@
  * fast_smmu_attach_device function.
  */
 static struct dma_fast_smmu_mapping *__fast_smmu_create_mapping_sized(
-	dma_addr_t base, size_t size)
+	dma_addr_t base, u64 size)
 {
 	struct dma_fast_smmu_mapping *fast;
 
@@ -696,7 +697,11 @@
 	fast->num_4k_pages = size >> FAST_PAGE_SHIFT;
 	fast->bitmap_size = BITS_TO_LONGS(fast->num_4k_pages) * sizeof(long);
 
-	fast->bitmap = kzalloc(fast->bitmap_size, GFP_KERNEL);
+	fast->bitmap = kzalloc(fast->bitmap_size, GFP_KERNEL | __GFP_NOWARN |
+								__GFP_NORETRY);
+	if (!fast->bitmap)
+		fast->bitmap = vzalloc(fast->bitmap_size);
+
 	if (!fast->bitmap)
 		goto err2;
 
@@ -726,7 +731,7 @@
 	int atomic_domain = 1;
 	struct iommu_domain *domain = mapping->domain;
 	struct iommu_pgtbl_info info;
-	size_t size = mapping->bits << PAGE_SHIFT;
+	u64 size = (u64)mapping->bits << PAGE_SHIFT;
 
 	if (mapping->base + size > (SZ_1G * 4ULL))
 		return -EINVAL;
@@ -780,7 +785,7 @@
 	dev->archdata.mapping = NULL;
 	set_dma_ops(dev, NULL);
 
-	kfree(mapping->fast->bitmap);
+	kvfree(mapping->fast->bitmap);
 	kfree(mapping->fast);
 }
 EXPORT_SYMBOL(fast_smmu_detach_device);
diff --git a/drivers/iommu/io-pgtable-fast.c b/drivers/iommu/io-pgtable-fast.c
index 85fe317..9b13fce 100644
--- a/drivers/iommu/io-pgtable-fast.c
+++ b/drivers/iommu/io-pgtable-fast.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,7 @@
 #include <linux/types.h>
 #include <linux/io-pgtable-fast.h>
 #include <asm/cacheflush.h>
+#include <linux/vmalloc.h>
 
 #include "io-pgtable.h"
 
@@ -268,11 +269,18 @@
 	return size;
 }
 
+#if defined(CONFIG_ARM64)
+#define FAST_PGDNDX(va) (((va) & 0x7fc0000000) >> 27)
+#elif defined(CONFIG_ARM)
+#define FAST_PGDNDX(va) (((va) & 0xc0000000) >> 27)
+#endif
+
 static phys_addr_t av8l_fast_iova_to_phys(struct io_pgtable_ops *ops,
 					  unsigned long iova)
 {
 	struct av8l_fast_io_pgtable *data = iof_pgtable_ops_to_data(ops);
 	av8l_fast_iopte pte, *pgdp, *pudp, *pmdp;
+	unsigned long pgd;
 	phys_addr_t phys;
 	const unsigned long pts = AV8L_FAST_PTE_TYPE_SHIFT;
 	const unsigned long ptm = AV8L_FAST_PTE_TYPE_MASK;
@@ -282,8 +290,9 @@
 
 	/* TODO: clean up some of these magic numbers... */
 
-	pgdp = (av8l_fast_iopte *)
-		(((unsigned long)data->pgd) | ((iova & 0x7fc0000000) >> 27));
+	pgd = (unsigned long)data->pgd | FAST_PGDNDX(iova);
+	pgdp = (av8l_fast_iopte *)pgd;
+
 	pte = *pgdp;
 	if (((pte >> pts) & ptm) != ptt)
 		return 0;
@@ -345,7 +354,12 @@
 	int i, j, pg = 0;
 	struct page **pages, *page;
 
-	pages = kmalloc(sizeof(*pages) * NUM_PGTBL_PAGES, GFP_KERNEL);
+	pages = kmalloc(sizeof(*pages) * NUM_PGTBL_PAGES, __GFP_NOWARN |
+							__GFP_NORETRY);
+
+	if (!pages)
+		pages = vmalloc(sizeof(*pages) * NUM_PGTBL_PAGES);
+
 	if (!pages)
 		return -ENOMEM;
 
@@ -414,7 +428,7 @@
 	for (i = 0; i < pg; ++i)
 		__free_page(pages[i]);
 err_free_pages_arr:
-	kfree(pages);
+	kvfree(pages);
 	return -ENOMEM;
 }
 
@@ -473,6 +487,9 @@
 
 	reg |= (64ULL - cfg->ias) << AV8L_FAST_TCR_T0SZ_SHIFT;
 	reg |= AV8L_FAST_TCR_EPD1_FAULT << AV8L_FAST_TCR_EPD1_SHIFT;
+#if defined(CONFIG_ARM)
+	reg |= ARM_32_LPAE_TCR_EAE;
+#endif
 	cfg->av8l_fast_cfg.tcr = reg;
 
 	/* MAIRs */
@@ -512,7 +529,7 @@
 	vunmap(data->pmds);
 	for (i = 0; i < NUM_PGTBL_PAGES; ++i)
 		__free_page(data->pages[i]);
-	kfree(data->pages);
+	kvfree(data->pages);
 	kfree(data);
 }
 
@@ -560,7 +577,7 @@
 						 const phys_addr_t phys_start,
 						 const size_t size)
 {
-	unsigned long iova = iova_start;
+	u64 iova = iova_start;
 	phys_addr_t phys = phys_start;
 
 	while (iova < (iova_start + size)) {
@@ -576,11 +593,12 @@
 static int __init av8l_fast_positive_testing(void)
 {
 	int failed = 0;
-	unsigned long iova;
+	u64 iova;
 	struct io_pgtable_ops *ops;
 	struct io_pgtable_cfg cfg;
 	struct av8l_fast_io_pgtable *data;
 	av8l_fast_iopte *pmds;
+	u64 max = SZ_1G * 4ULL - 1;
 
 	cfg = (struct io_pgtable_cfg) {
 		.quirks = 0,
@@ -600,19 +618,18 @@
 	pmds = data->pmds;
 
 	/* map the entire 4GB VA space with 4K map calls */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_4K) {
+	for (iova = 0; iova < max; iova += SZ_4K) {
 		if (WARN_ON(ops->map(ops, iova, iova, SZ_4K, IOMMU_READ))) {
 			failed++;
 			continue;
 		}
 	}
-
 	if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
-							  SZ_1G * 4UL)))
+							  max)))
 		failed++;
 
 	/* unmap it all */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_4K) {
+	for (iova = 0; iova < max; iova += SZ_4K) {
 		if (WARN_ON(ops->unmap(ops, iova, SZ_4K) != SZ_4K))
 			failed++;
 	}
@@ -621,7 +638,7 @@
 	av8l_fast_clear_stale_ptes(pmds, false);
 
 	/* map the entire 4GB VA space with 8K map calls */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_8K) {
+	for (iova = 0; iova < max; iova += SZ_8K) {
 		if (WARN_ON(ops->map(ops, iova, iova, SZ_8K, IOMMU_READ))) {
 			failed++;
 			continue;
@@ -629,11 +646,11 @@
 	}
 
 	if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
-							  SZ_1G * 4UL)))
+							  max)))
 		failed++;
 
 	/* unmap it all with 8K unmap calls */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_8K) {
+	for (iova = 0; iova < max; iova += SZ_8K) {
 		if (WARN_ON(ops->unmap(ops, iova, SZ_8K) != SZ_8K))
 			failed++;
 	}
@@ -642,7 +659,7 @@
 	av8l_fast_clear_stale_ptes(pmds, false);
 
 	/* map the entire 4GB VA space with 16K map calls */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_16K) {
+	for (iova = 0; iova < max; iova += SZ_16K) {
 		if (WARN_ON(ops->map(ops, iova, iova, SZ_16K, IOMMU_READ))) {
 			failed++;
 			continue;
@@ -650,11 +667,11 @@
 	}
 
 	if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
-							  SZ_1G * 4UL)))
+							  max)))
 		failed++;
 
 	/* unmap it all */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_16K) {
+	for (iova = 0; iova < max; iova += SZ_16K) {
 		if (WARN_ON(ops->unmap(ops, iova, SZ_16K) != SZ_16K))
 			failed++;
 	}
@@ -663,7 +680,7 @@
 	av8l_fast_clear_stale_ptes(pmds, false);
 
 	/* map the entire 4GB VA space with 64K map calls */
-	for (iova = 0; iova < SZ_1G * 4UL; iova += SZ_64K) {
+	for (iova = 0; iova < max; iova += SZ_64K) {
 		if (WARN_ON(ops->map(ops, iova, iova, SZ_64K, IOMMU_READ))) {
 			failed++;
 			continue;
@@ -671,11 +688,11 @@
 	}
 
 	if (WARN_ON(!av8l_fast_range_has_specific_mapping(ops, 0, 0,
-							  SZ_1G * 4UL)))
+							  max)))
 		failed++;
 
 	/* unmap it all at once */
-	if (WARN_ON(ops->unmap(ops, 0, SZ_1G * 4UL) != SZ_1G * 4UL))
+	if (WARN_ON(ops->unmap(ops, 0, max) != max))
 		failed++;
 
 	free_io_pgtable_ops(ops);
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index 45ffb40..5730126 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -822,7 +822,7 @@
 	if (!virt)
 		goto out;
 
-	mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
+	mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4ULL);
 	if (!mapping) {
 		seq_puts(s, "fast_smmu_create_mapping failed\n");
 		goto out_kfree;
@@ -922,8 +922,8 @@
 static int __tlb_stress_sweep(struct device *dev, struct seq_file *s)
 {
 	int i, ret = 0;
-	unsigned long iova;
-	const unsigned long max = SZ_1G * 4UL;
+	u64 iova;
+	const u64  max = SZ_1G * 4ULL - 1;
 	void *virt;
 	phys_addr_t phys;
 	dma_addr_t dma_addr;
@@ -995,8 +995,8 @@
 	}
 
 	/* we're all full again. unmap everything. */
-	for (dma_addr = 0; dma_addr < max; dma_addr += SZ_8K)
-		dma_unmap_single(dev, dma_addr, SZ_8K, DMA_TO_DEVICE);
+	for (iova = 0; iova < max; iova += SZ_8K)
+		dma_unmap_single(dev, (dma_addr_t)iova, SZ_8K, DMA_TO_DEVICE);
 
 out:
 	free_pages((unsigned long)virt, get_order(SZ_8K));
@@ -1029,7 +1029,7 @@
 			   const size_t size)
 {
 	u64 iova;
-	const unsigned long max = SZ_1G * 4UL;
+	const u64 max = SZ_1G * 4ULL - 1;
 	int i, remapped, unmapped, ret = 0;
 	void *virt;
 	dma_addr_t dma_addr, dma_addr2;
@@ -1061,9 +1061,9 @@
 	fib_init(&fib);
 	for (iova = get_next_fib(&fib) * size;
 	     iova < max - size;
-	     iova = get_next_fib(&fib) * size) {
-		dma_addr = iova;
-		dma_addr2 = max - size - iova;
+	     iova = (u64)get_next_fib(&fib) * size) {
+		dma_addr = (dma_addr_t)(iova);
+		dma_addr2 = (dma_addr_t)((max + 1) - size - iova);
 		if (dma_addr == dma_addr2) {
 			WARN(1,
 			"%s test needs update! The random number sequence is folding in on itself and should be changed.\n",
@@ -1089,8 +1089,8 @@
 		ret = -EINVAL;
 	}
 
-	for (dma_addr = 0; dma_addr < max; dma_addr += size)
-		dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
+	for (iova = 0; iova < max; iova += size)
+		dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
 
 out:
 	free_pages((unsigned long)virt, get_order(size));
@@ -1118,10 +1118,11 @@
 static int __full_va_sweep(struct device *dev, struct seq_file *s,
 			   const size_t size, struct iommu_domain *domain)
 {
-	unsigned long iova;
+	u64 iova;
 	dma_addr_t dma_addr;
 	void *virt;
 	phys_addr_t phys;
+	const u64 max = SZ_1G * 4ULL - 1;
 	int ret = 0, i;
 
 	virt = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
@@ -1136,7 +1137,7 @@
 	}
 	phys = virt_to_phys(virt);
 
-	for (iova = 0, i = 0; iova < SZ_1G * 4UL; iova += size, ++i) {
+	for (iova = 0, i = 0; iova < max; iova += size, ++i) {
 		unsigned long expected = iova;
 
 		dma_addr = dma_map_single(dev, virt, size, DMA_TO_DEVICE);
@@ -1184,8 +1185,8 @@
 	}
 
 out:
-	for (dma_addr = 0; dma_addr < SZ_1G * 4UL; dma_addr += size)
-		dma_unmap_single(dev, dma_addr, size, DMA_TO_DEVICE);
+	for (iova = 0; iova < max; iova += size)
+		dma_unmap_single(dev, (dma_addr_t)iova, size, DMA_TO_DEVICE);
 
 	free_pages((unsigned long)virt, get_order(size));
 	return ret;
@@ -1374,7 +1375,8 @@
 	int ret = -EINVAL, fast = 1;
 	phys_addr_t pt_phys;
 
-	mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
+	mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
+						(SZ_1G * 4ULL));
 	if (!mapping)
 		goto out;
 
@@ -1443,7 +1445,9 @@
 	size_t sizes[] = {SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12, 0};
 	int ret = -EINVAL;
 
-	mapping = arm_iommu_create_mapping(&platform_bus_type, 0, SZ_1G * 4UL);
+	/* Make the size equal to MAX_ULONG */
+	mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
+						(SZ_1G * 4ULL - 1));
 	if (!mapping)
 		goto out;
 
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index b045e3b..fdc4b30 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -175,9 +175,7 @@
 struct flash_node_data {
 	struct platform_device		*pdev;
 	struct led_classdev		cdev;
-	struct pinctrl			*pinctrl;
-	struct pinctrl_state		*gpio_state_active;
-	struct pinctrl_state		*gpio_state_suspend;
+	struct pinctrl			*strobe_pinctrl;
 	struct pinctrl_state		*hw_strobe_state_active;
 	struct pinctrl_state		*hw_strobe_state_suspend;
 	int				hw_strobe_gpio;
@@ -198,6 +196,9 @@
 struct flash_switch_data {
 	struct platform_device		*pdev;
 	struct regulator		*vreg;
+	struct pinctrl			*led_en_pinctrl;
+	struct pinctrl_state		*gpio_state_active;
+	struct pinctrl_state		*gpio_state_suspend;
 	struct led_classdev		cdev;
 	int				led_mask;
 	bool				regulator_on;
@@ -509,7 +510,7 @@
 
 	if (led->pdata->led1n2_iclamp_low_ma) {
 		val = CURRENT_MA_TO_REG_VAL(led->pdata->led1n2_iclamp_low_ma,
-						led->fnode[0].ires_ua);
+						led->fnode[LED1].ires_ua);
 		rc = qpnp_flash_led_masked_write(led,
 				FLASH_LED_REG_LED1N2_ICLAMP_LOW(led->base),
 				FLASH_LED_CURRENT_MASK, val);
@@ -519,7 +520,7 @@
 
 	if (led->pdata->led1n2_iclamp_mid_ma) {
 		val = CURRENT_MA_TO_REG_VAL(led->pdata->led1n2_iclamp_mid_ma,
-						led->fnode[0].ires_ua);
+						led->fnode[LED1].ires_ua);
 		rc = qpnp_flash_led_masked_write(led,
 				FLASH_LED_REG_LED1N2_ICLAMP_MID(led->base),
 				FLASH_LED_CURRENT_MASK, val);
@@ -529,7 +530,7 @@
 
 	if (led->pdata->led3_iclamp_low_ma) {
 		val = CURRENT_MA_TO_REG_VAL(led->pdata->led3_iclamp_low_ma,
-						led->fnode[3].ires_ua);
+						led->fnode[LED3].ires_ua);
 		rc = qpnp_flash_led_masked_write(led,
 				FLASH_LED_REG_LED3_ICLAMP_LOW(led->base),
 				FLASH_LED_CURRENT_MASK, val);
@@ -539,7 +540,7 @@
 
 	if (led->pdata->led3_iclamp_mid_ma) {
 		val = CURRENT_MA_TO_REG_VAL(led->pdata->led3_iclamp_mid_ma,
-						led->fnode[3].ires_ua);
+						led->fnode[LED3].ires_ua);
 		rc = qpnp_flash_led_masked_write(led,
 				FLASH_LED_REG_LED3_ICLAMP_MID(led->base),
 				FLASH_LED_CURRENT_MASK, val);
@@ -570,9 +571,9 @@
 
 	if (gpio_is_valid(fnode->hw_strobe_gpio)) {
 		gpio_set_value(fnode->hw_strobe_gpio, on ? 1 : 0);
-	} else if (fnode->hw_strobe_state_active &&
+	} else if (fnode->strobe_pinctrl && fnode->hw_strobe_state_active &&
 					fnode->hw_strobe_state_suspend) {
-		rc = pinctrl_select_state(fnode->pinctrl,
+		rc = pinctrl_select_state(fnode->strobe_pinctrl,
 			on ? fnode->hw_strobe_state_active :
 			fnode->hw_strobe_state_suspend);
 		if (rc < 0) {
@@ -949,15 +950,6 @@
 
 		led->fnode[i].led_on = false;
 
-		if (led->fnode[i].pinctrl) {
-			rc = pinctrl_select_state(led->fnode[i].pinctrl,
-					led->fnode[i].gpio_state_suspend);
-			if (rc < 0) {
-				pr_err("failed to disable GPIO, rc=%d\n", rc);
-				return rc;
-			}
-		}
-
 		if (led->fnode[i].trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) {
 			rc = qpnp_flash_led_hw_strobe_enable(&led->fnode[i],
 					led->pdata->hw_strobe_option, false);
@@ -969,6 +961,17 @@
 		}
 	}
 
+	if (snode->led_en_pinctrl) {
+		pr_debug("Selecting suspend state for %s\n", snode->cdev.name);
+		rc = pinctrl_select_state(snode->led_en_pinctrl,
+				snode->gpio_state_suspend);
+		if (rc < 0) {
+			pr_err("failed to select pinctrl suspend state rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
 	snode->enabled = false;
 	return 0;
 }
@@ -1039,15 +1042,6 @@
 
 		val |= FLASH_LED_ENABLE << led->fnode[i].id;
 
-		if (led->fnode[i].pinctrl) {
-			rc = pinctrl_select_state(led->fnode[i].pinctrl,
-					led->fnode[i].gpio_state_active);
-			if (rc < 0) {
-				pr_err("failed to enable GPIO rc=%d\n", rc);
-				return rc;
-			}
-		}
-
 		if (led->fnode[i].trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) {
 			rc = qpnp_flash_led_hw_strobe_enable(&led->fnode[i],
 					led->pdata->hw_strobe_option, true);
@@ -1059,6 +1053,17 @@
 		}
 	}
 
+	if (snode->led_en_pinctrl) {
+		pr_debug("Selecting active state for %s\n", snode->cdev.name);
+		rc = pinctrl_select_state(snode->led_en_pinctrl,
+				snode->gpio_state_active);
+		if (rc < 0) {
+			pr_err("failed to select pinctrl active state rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
 	if (led->enable == 0) {
 		rc = qpnp_flash_led_masked_write(led,
 				FLASH_LED_REG_MOD_CTRL(led->base),
@@ -1461,6 +1466,20 @@
 	}
 	fnode->trigger = (strobe_sel << 2) | (edge_trigger << 1) | active_high;
 
+	rc = led_classdev_register(&led->pdev->dev, &fnode->cdev);
+	if (rc < 0) {
+		pr_err("Unable to register led node %d\n", fnode->id);
+		return rc;
+	}
+
+	fnode->cdev.dev->of_node = node;
+	fnode->strobe_pinctrl = devm_pinctrl_get(fnode->cdev.dev);
+	if (IS_ERR_OR_NULL(fnode->strobe_pinctrl)) {
+		pr_debug("No pinctrl defined for %s, err=%ld\n",
+			fnode->cdev.name, PTR_ERR(fnode->strobe_pinctrl));
+		fnode->strobe_pinctrl = NULL;
+	}
+
 	if (fnode->trigger & FLASH_LED_HW_SW_STROBE_SEL_BIT) {
 		if (of_find_property(node, "qcom,hw-strobe-gpio", NULL)) {
 			fnode->hw_strobe_gpio = of_get_named_gpio(node,
@@ -1470,11 +1489,11 @@
 				return fnode->hw_strobe_gpio;
 			}
 			gpio_direction_output(fnode->hw_strobe_gpio, 0);
-		} else {
+		} else if (fnode->strobe_pinctrl) {
 			fnode->hw_strobe_gpio = -1;
 			fnode->hw_strobe_state_active =
-				pinctrl_lookup_state(fnode->pinctrl,
-				"strobe_enable");
+				pinctrl_lookup_state(fnode->strobe_pinctrl,
+							"strobe_enable");
 			if (IS_ERR_OR_NULL(fnode->hw_strobe_state_active)) {
 				pr_err("No active pin for hardware strobe, rc=%ld\n",
 					PTR_ERR(fnode->hw_strobe_state_active));
@@ -1482,8 +1501,8 @@
 			}
 
 			fnode->hw_strobe_state_suspend =
-				pinctrl_lookup_state(fnode->pinctrl,
-				"strobe_disable");
+				pinctrl_lookup_state(fnode->strobe_pinctrl,
+							"strobe_disable");
 			if (IS_ERR_OR_NULL(fnode->hw_strobe_state_suspend)) {
 				pr_err("No suspend pin for hardware strobe, rc=%ld\n",
 					PTR_ERR(fnode->hw_strobe_state_suspend)
@@ -1493,38 +1512,6 @@
 		}
 	}
 
-	rc = led_classdev_register(&led->pdev->dev, &fnode->cdev);
-	if (rc < 0) {
-		pr_err("Unable to register led node %d\n", fnode->id);
-		return rc;
-	}
-
-	fnode->cdev.dev->of_node = node;
-
-	fnode->pinctrl = devm_pinctrl_get(fnode->cdev.dev);
-	if (IS_ERR_OR_NULL(fnode->pinctrl)) {
-		pr_debug("No pinctrl defined\n");
-		fnode->pinctrl = NULL;
-	} else {
-		fnode->gpio_state_active =
-			pinctrl_lookup_state(fnode->pinctrl, "led_enable");
-		if (IS_ERR_OR_NULL(fnode->gpio_state_active)) {
-			pr_err("Cannot lookup LED active state\n");
-			devm_pinctrl_put(fnode->pinctrl);
-			fnode->pinctrl = NULL;
-			return PTR_ERR(fnode->gpio_state_active);
-		}
-
-		fnode->gpio_state_suspend =
-			pinctrl_lookup_state(fnode->pinctrl, "led_disable");
-		if (IS_ERR_OR_NULL(fnode->gpio_state_suspend)) {
-			pr_err("Cannot lookup LED disable state\n");
-			devm_pinctrl_put(fnode->pinctrl);
-			fnode->pinctrl = NULL;
-			return PTR_ERR(fnode->gpio_state_suspend);
-		}
-	}
-
 	return 0;
 }
 
@@ -1589,6 +1576,36 @@
 	}
 
 	snode->cdev.dev->of_node = node;
+
+	snode->led_en_pinctrl = devm_pinctrl_get(snode->cdev.dev);
+	if (IS_ERR_OR_NULL(snode->led_en_pinctrl)) {
+		pr_debug("No pinctrl defined for %s, err=%ld\n",
+			snode->cdev.name, PTR_ERR(snode->led_en_pinctrl));
+		snode->led_en_pinctrl = NULL;
+	}
+
+	if (snode->led_en_pinctrl) {
+		snode->gpio_state_active =
+			pinctrl_lookup_state(snode->led_en_pinctrl,
+						"led_enable");
+		if (IS_ERR_OR_NULL(snode->gpio_state_active)) {
+			pr_err("Cannot lookup LED active state\n");
+			devm_pinctrl_put(snode->led_en_pinctrl);
+			snode->led_en_pinctrl = NULL;
+			return PTR_ERR(snode->gpio_state_active);
+		}
+
+		snode->gpio_state_suspend =
+			pinctrl_lookup_state(snode->led_en_pinctrl,
+						"led_disable");
+		if (IS_ERR_OR_NULL(snode->gpio_state_suspend)) {
+			pr_err("Cannot lookup LED disable state\n");
+			devm_pinctrl_put(snode->led_en_pinctrl);
+			snode->led_en_pinctrl = NULL;
+			return PTR_ERR(snode->gpio_state_suspend);
+		}
+	}
+
 	return 0;
 }
 
@@ -2095,22 +2112,24 @@
 		if (!strcmp("flash", temp_string) ||
 				!strcmp("torch", temp_string)) {
 			rc = qpnp_flash_led_parse_each_led_dt(led,
-					&led->fnode[i++], temp);
+					&led->fnode[i], temp);
 			if (rc < 0) {
 				pr_err("Unable to parse flash node %d rc=%d\n",
 					i, rc);
 				goto error_led_register;
 			}
+			i++;
 		}
 
 		if (!strcmp("switch", temp_string)) {
 			rc = qpnp_flash_led_parse_and_register_switch(led,
-					&led->snode[j++], temp);
+					&led->snode[j], temp);
 			if (rc < 0) {
 				pr_err("Unable to parse and register switch node, rc=%d\n",
 					rc);
 				goto error_switch_register;
 			}
+			j++;
 		}
 	}
 
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 628ba00..e66f404 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -986,26 +986,29 @@
 	struct dm_offload *o = container_of(cb, struct dm_offload, cb);
 	struct bio_list list;
 	struct bio *bio;
+	int i;
 
 	INIT_LIST_HEAD(&o->cb.list);
 
 	if (unlikely(!current->bio_list))
 		return;
 
-	list = *current->bio_list;
-	bio_list_init(current->bio_list);
+	for (i = 0; i < 2; i++) {
+		list = current->bio_list[i];
+		bio_list_init(&current->bio_list[i]);
 
-	while ((bio = bio_list_pop(&list))) {
-		struct bio_set *bs = bio->bi_pool;
-		if (unlikely(!bs) || bs == fs_bio_set) {
-			bio_list_add(current->bio_list, bio);
-			continue;
+		while ((bio = bio_list_pop(&list))) {
+			struct bio_set *bs = bio->bi_pool;
+			if (unlikely(!bs) || bs == fs_bio_set) {
+				bio_list_add(&current->bio_list[i], bio);
+				continue;
+			}
+
+			spin_lock(&bs->rescue_lock);
+			bio_list_add(&bs->rescue_list, bio);
+			queue_work(bs->rescue_workqueue, &bs->rescue_work);
+			spin_unlock(&bs->rescue_lock);
 		}
-
-		spin_lock(&bs->rescue_lock);
-		bio_list_add(&bs->rescue_list, bio);
-		queue_work(bs->rescue_workqueue, &bs->rescue_work);
-		spin_unlock(&bs->rescue_lock);
 	}
 }
 
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 55b5e0e..4c4aab0 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -941,7 +941,8 @@
 				    !conf->barrier ||
 				    (atomic_read(&conf->nr_pending) &&
 				     current->bio_list &&
-				     !bio_list_empty(current->bio_list)),
+				     (!bio_list_empty(&current->bio_list[0]) ||
+				      !bio_list_empty(&current->bio_list[1]))),
 				    conf->resync_lock);
 		conf->nr_waiting--;
 		if (!conf->nr_waiting)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
index 980e4af..819f57b 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_core.h
@@ -459,6 +459,7 @@
 			bool input);
 	int (*ops_hw_get_downscale_caps)(struct sde_rot_mgr *mgr, char *caps,
 			int len);
+	int (*ops_hw_get_maxlinewidth)(struct sde_rot_mgr *mgr);
 
 	void *hw_data;
 };
@@ -490,6 +491,14 @@
 	return 0;
 }
 
+static inline int sde_rotator_get_maxlinewidth(struct sde_rot_mgr *mgr)
+{
+	if (mgr && mgr->ops_hw_get_maxlinewidth)
+		return mgr->ops_hw_get_maxlinewidth(mgr);
+
+	return 2048;
+}
+
 static inline int __compare_session_item_rect(
 	struct sde_rotation_buf_info *s_rect,
 	struct sde_rect *i_rect, uint32_t i_fmt, bool src)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
index c061446..1c94632 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_dev.c
@@ -1315,6 +1315,35 @@
 EXPORT_SYMBOL(sde_rotator_inline_get_downscale_caps);
 
 /*
+ * sde_rotator_inline_get_maxlinewidth - get maximum line width of rotator
+ * @pdev: Pointer to platform device
+ * return: maximum line width
+ */
+int sde_rotator_inline_get_maxlinewidth(struct platform_device *pdev)
+{
+	struct sde_rotator_device *rot_dev;
+	int maxlinewidth;
+
+	if (!pdev) {
+		SDEROT_ERR("invalid platform device\n");
+		return -EINVAL;
+	}
+
+	rot_dev = (struct sde_rotator_device *)platform_get_drvdata(pdev);
+	if (!rot_dev || !rot_dev->mgr) {
+		SDEROT_ERR("invalid rotator device\n");
+		return -EINVAL;
+	}
+
+	sde_rot_mgr_lock(rot_dev->mgr);
+	maxlinewidth = sde_rotator_get_maxlinewidth(rot_dev->mgr);
+	sde_rot_mgr_unlock(rot_dev->mgr);
+
+	return maxlinewidth;
+}
+EXPORT_SYMBOL(sde_rotator_inline_get_maxlinewidth);
+
+/*
  * sde_rotator_inline_get_pixfmt_caps - get pixel format capability
  * @pdev: Pointer to platform device
  * @pixfmt: array of pixel format buffer
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
index ec89785..27fd0c3 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_inline.h
@@ -104,6 +104,7 @@
 		u32 src_pixfmt, u32 *dst_pixfmt);
 int sde_rotator_inline_get_downscale_caps(struct platform_device *pdev,
 		char *downscale_caps, int len);
+int sde_rotator_inline_get_maxlinewidth(struct platform_device *pdev);
 int sde_rotator_inline_get_pixfmt_caps(struct platform_device *pdev,
 		bool input, u32 *pixfmt, int len);
 int sde_rotator_inline_commit(void *handle, struct sde_rotator_inline_cmd *cmd,
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 9071361..a152573 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -55,6 +55,8 @@
 #define DEFAULT_UBWC_MALSIZE	1
 #define DEFAULT_UBWC_SWIZZLE	1
 
+#define DEFAULT_MAXLINEWIDTH	4096
+
 /* Macro for constructing the REGDMA command */
 #define SDE_REGDMA_WRITE(p, off, data) \
 	do { \
@@ -2457,11 +2459,24 @@
 		struct sde_rot_entry *entry)
 {
 	struct sde_rot_data_type *mdata = sde_rot_get_mdata();
+	struct sde_hw_rotator *hw_data;
 	int ret = 0;
 	u16 src_w, src_h, dst_w, dst_h;
 	struct sde_rotation_item *item = &entry->item;
 	struct sde_mdp_format_params *fmt;
 
+	if (!mgr || !entry || !mgr->hw_data) {
+		SDEROT_ERR("invalid parameters\n");
+		return -EINVAL;
+	}
+
+	hw_data = mgr->hw_data;
+
+	if (hw_data->maxlinewidth < item->src_rect.w) {
+		SDEROT_ERR("invalid src width %u\n", item->src_rect.w);
+		return -EINVAL;
+	}
+
 	src_w = item->src_rect.w;
 	src_h = item->src_rect.h;
 
@@ -2756,6 +2771,25 @@
 }
 
 /*
+ * sde_hw_rotator_get_maxlinewidth - get maximum line width supported
+ * @mgr: Pointer to rotator manager
+ * return: maximum line width supported by hardware
+ */
+static int sde_hw_rotator_get_maxlinewidth(struct sde_rot_mgr *mgr)
+{
+	struct sde_hw_rotator *rot;
+
+	if (!mgr || !mgr->hw_data) {
+		SDEROT_ERR("null parameters\n");
+		return -EINVAL;
+	}
+
+	rot = mgr->hw_data;
+
+	return rot->maxlinewidth;
+}
+
+/*
  * sde_hw_rotator_parse_dt - parse r3 specific device tree settings
  * @hw_data: Pointer to rotator hw
  * @dev: Pointer to platform device
@@ -2824,6 +2858,16 @@
 		hw_data->sbuf_headroom = data;
 	}
 
+	ret = of_property_read_u32(dev->dev.of_node,
+			"qcom,mdss-rot-linewidth", &data);
+	if (ret) {
+		ret = 0;
+		hw_data->maxlinewidth = DEFAULT_MAXLINEWIDTH;
+	} else {
+		SDEROT_DBG("set mdss-rot-linewidth to %d\n", data);
+		hw_data->maxlinewidth = data;
+	}
+
 	return ret;
 }
 
@@ -2871,6 +2915,7 @@
 	mgr->ops_hw_pre_pmevent = sde_hw_rotator_pre_pmevent;
 	mgr->ops_hw_post_pmevent = sde_hw_rotator_post_pmevent;
 	mgr->ops_hw_get_downscale_caps = sde_hw_rotator_get_downscale_caps;
+	mgr->ops_hw_get_maxlinewidth = sde_hw_rotator_get_maxlinewidth;
 
 	ret = sde_hw_rotator_parse_dt(mgr->hw_data, mgr->pdev);
 	if (ret)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
index d1607d9..22eaa3f 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_internal.h
@@ -262,6 +262,7 @@
  * @outpixfmts: array of supported output pixel formats in fourcc
  * @num_outpixfmt: size of the supported output pixel formats array
  * @downscale_caps: capability string of scaling
+ * @maxlinewidth: maximum line width supported
  */
 struct sde_hw_rotator {
 	/* base */
@@ -322,6 +323,7 @@
 	u32 *outpixfmts;
 	u32 num_outpixfmt;
 	const char *downscale_caps;
+	u32 maxlinewidth;
 };
 
 /**
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 87a4ac8..709f1d8 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1297,18 +1297,6 @@
 			sizeof(struct hfi_quantization_range);
 		break;
 	}
-	case HAL_PARAM_VENC_MAX_NUM_B_FRAMES:
-	{
-		struct hfi_max_num_b_frames *hfi;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES;
-		hfi = (struct hfi_max_num_b_frames *) &pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_max_num_b_frames *) pdata,
-				sizeof(struct hfi_max_num_b_frames));
-		pkt->size += sizeof(u32) + sizeof(struct hfi_max_num_b_frames);
-		break;
-	}
 	case HAL_CONFIG_VENC_INTRA_PERIOD:
 	{
 		struct hfi_intra_period *hfi;
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.h b/drivers/media/platform/msm/vidc/hfi_packetization.h
index e0def0f..06c0574 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.h
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.h
@@ -18,9 +18,9 @@
 #include "vidc_hfi.h"
 #include "vidc_hfi_api.h"
 
-#define call_hfi_pkt_op(q, op, args...)			\
+#define call_hfi_pkt_op(q, op, ...)			\
 	(((q) && (q)->pkt_ops && (q)->pkt_ops->op) ?	\
-	((q)->pkt_ops->op(args)) : 0)
+	((q)->pkt_ops->op(__VA_ARGS__)) : 0)
 
 enum hfi_packetization_type {
 	HFI_PACKETIZATION_4XX,
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 00830cc..3378ff0 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -241,10 +241,8 @@
 		} while (num_properties_changed > 0);
 	}
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_EVENT_CHANGE,
-		.response.event = event_notify,
-	};
+	info->response_type = HAL_SESSION_EVENT_CHANGE;
+	info->response.event = event_notify;
 
 	return 0;
 }
@@ -275,10 +273,8 @@
 	event_notify.packet_buffer = data->packet_buffer;
 	event_notify.extra_data_buffer = data->extra_data_buffer;
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_EVENT_CHANGE,
-		.response.event = event_notify,
-	};
+	info->response_type = HAL_SESSION_EVENT_CHANGE;
+	info->response.event = event_notify;
 
 	return 0;
 }
@@ -289,10 +285,8 @@
 
 	cmd_done.device_id = device_id;
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SYS_ERROR,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SYS_ERROR;
+	info->response.cmd = cmd_done;
 
 	return 0;
 }
@@ -315,17 +309,13 @@
 	case HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED:
 		cmd_done.status = VIDC_ERR_NONE;
 		dprintk(VIDC_INFO, "Non Fatal: HFI_EVENT_SESSION_ERROR\n");
-		*info = (struct msm_vidc_cb_info) {
-			.response_type =  HAL_RESPONSE_UNUSED,
-			.response.cmd = cmd_done,
-		};
+		info->response_type = HAL_RESPONSE_UNUSED;
+		info->response.cmd = cmd_done;
 		return 0;
 	default:
 		dprintk(VIDC_ERR, "HFI_EVENT_SESSION_ERROR\n");
-		*info = (struct msm_vidc_cb_info) {
-			.response_type =  HAL_SESSION_ERROR,
-			.response.cmd = cmd_done,
-		};
+		info->response_type = HAL_SESSION_ERROR;
+		info->response.cmd = cmd_done;
 		return 0;
 	}
 }
@@ -403,10 +393,10 @@
 	cmd_done.session_id = NULL;
 	cmd_done.status = (u32)status;
 	cmd_done.size = sizeof(struct vidc_hal_sys_init_done);
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SYS_INIT_DONE,
-		.response.cmd = cmd_done,
-	};
+
+	info->response_type = HAL_SYS_INIT_DONE;
+	info->response.cmd = cmd_done;
+
 	return 0;
 }
 
@@ -433,10 +423,8 @@
 	cmd_done.status = (u32) status;
 	cmd_done.size = 0;
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SYS_RELEASE_RESOURCE_DONE,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SYS_RELEASE_RESOURCE_DONE;
+	info->response.cmd = cmd_done;
 
 	return 0;
 }
@@ -1162,10 +1150,8 @@
 		cmd_done.data.property.buf_req = buff_req;
 		cmd_done.size = sizeof(buff_req);
 
-		*info = (struct msm_vidc_cb_info) {
-			.response_type =  HAL_SESSION_PROPERTY_INFO,
-			.response.cmd = cmd_done,
-		};
+		info->response_type = HAL_SESSION_PROPERTY_INFO;
+		info->response.cmd = cmd_done;
 
 		return 0;
 	default:
@@ -1197,10 +1183,8 @@
 	cmd_done.data.session_init_done = session_init_done;
 	cmd_done.size = sizeof(struct vidc_hal_session_init_done);
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_INIT_DONE,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SESSION_INIT_DONE;
+	info->response.cmd = cmd_done;
 
 	return 0;
 }
@@ -1227,10 +1211,8 @@
 	cmd_done.status = hfi_map_err_status(pkt->error_type);
 	cmd_done.size = 0;
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_LOAD_RESOURCE_DONE,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SESSION_LOAD_RESOURCE_DONE;
+	info->response.cmd = cmd_done;
 
 	return 0;
 }
@@ -1272,10 +1254,8 @@
 		return -EINVAL;
 	}
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_FLUSH_DONE,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SESSION_FLUSH_DONE;
+	info->response.cmd = cmd_done;
 
 	return 0;
 }
@@ -1323,10 +1303,8 @@
 		(u32)pkt->packet_buffer, -1, -1,
 		pkt->filled_len, pkt->offset);
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_ETB_DONE,
-		.response.data = data_done,
-	};
+	info->response_type = HAL_SESSION_ETB_DONE;
+	info->response.data = data_done;
 
 	return 0;
 }
@@ -1450,10 +1428,8 @@
 		data_done.output_done.filled_len1,
 		data_done.output_done.offset1);
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_FTB_DONE,
-		.response.data = data_done,
-	};
+	info->response_type = HAL_SESSION_FTB_DONE;
+	info->response.data = data_done;
 
 	return 0;
 }
@@ -1479,10 +1455,8 @@
 	cmd_done.status = hfi_map_err_status(pkt->error_type);
 	cmd_done.size = 0;
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_START_DONE,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SESSION_START_DONE;
+	info->response.cmd = cmd_done;
 	return 0;
 }
 
@@ -1507,10 +1481,8 @@
 	cmd_done.status = hfi_map_err_status(pkt->error_type);
 	cmd_done.size = 0;
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_STOP_DONE,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SESSION_STOP_DONE;
+	info->response.cmd = cmd_done;
 
 	return 0;
 }
@@ -1536,10 +1508,8 @@
 	cmd_done.status = hfi_map_err_status(pkt->error_type);
 	cmd_done.size = 0;
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_RELEASE_RESOURCE_DONE,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SESSION_RELEASE_RESOURCE_DONE;
+	info->response.cmd = cmd_done;
 
 	return 0;
 }
@@ -1571,10 +1541,8 @@
 		dprintk(VIDC_ERR, "invalid payload in rel_buff_done\n");
 	}
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_RELEASE_BUFFER_DONE,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SESSION_RELEASE_BUFFER_DONE;
+	info->response.cmd = cmd_done;
 
 	return 0;
 }
@@ -1598,10 +1566,8 @@
 	cmd_done.status = hfi_map_err_status(pkt->error_type);
 	cmd_done.size = 0;
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_END_DONE,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SESSION_END_DONE;
+	info->response.cmd = cmd_done;
 
 	return 0;
 }
@@ -1626,10 +1592,8 @@
 	cmd_done.status = hfi_map_err_status(pkt->error_type);
 	cmd_done.size = 0;
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_ABORT_DONE,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SESSION_ABORT_DONE;
+	info->response.cmd = cmd_done;
 
 	return 0;
 }
diff --git a/drivers/media/platform/msm/vidc/msm_vdec.c b/drivers/media/platform/msm/vidc/msm_vdec.c
index 7c99e90..b211175 100644
--- a/drivers/media/platform/msm/vidc/msm_vdec.c
+++ b/drivers/media/platform/msm/vidc/msm_vdec.c
@@ -522,7 +522,7 @@
 			inst->prop.width[CAPTURE_PORT] == f->fmt.pix_mp.width &&
 			inst->prop.height[CAPTURE_PORT] ==
 				f->fmt.pix_mp.height) {
-			dprintk(VIDC_DBG, "Thank you : Nothing changed\n");
+			dprintk(VIDC_DBG, "No change in CAPTURE port params\n");
 			return 0;
 		}
 		memcpy(&inst->fmts[fmt->type], fmt,
@@ -590,7 +590,7 @@
 			inst->prop.width[OUTPUT_PORT] == f->fmt.pix_mp.width &&
 			inst->prop.height[OUTPUT_PORT] ==
 				f->fmt.pix_mp.height) {
-			dprintk(VIDC_DBG, "Thank you : Nothing changed\n");
+			dprintk(VIDC_DBG, "No change in OUTPUT port params\n");
 			return 0;
 		}
 		inst->prop.width[OUTPUT_PORT] = f->fmt.pix_mp.width;
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 13cc1b2..7bfd255 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1093,7 +1093,7 @@
 	if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_VP8)
 		return 0;
 
-	num_enh_layers = layers ? : 0;
+	num_enh_layers = layers ? layers : 0;
 	dprintk(VIDC_DBG, "%s Hier-P in firmware\n",
 			num_enh_layers ? "Enable" : "Disable");
 
@@ -1244,7 +1244,6 @@
 	case V4L2_CID_MPEG_VIDC_VIDEO_NUM_P_FRAMES:
 	{
 		int num_p, num_b;
-		u32 max_num_b_frames;
 
 		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES);
 		num_b = temp_ctrl->val;
@@ -1257,34 +1256,10 @@
 		else if (ctrl->id == V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES)
 			num_b = ctrl->val;
 
-		max_num_b_frames = num_b ? MAX_NUM_B_FRAMES : 0;
-		property_id = HAL_PARAM_VENC_MAX_NUM_B_FRAMES;
-		pdata = &max_num_b_frames;
-		rc = call_hfi_op(hdev, session_set_property,
-			(void *)inst->session, property_id, pdata);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"Failed : Setprop MAX_NUM_B_FRAMES %d\n",
-				rc);
-			break;
-		}
-
 		property_id = HAL_CONFIG_VENC_INTRA_PERIOD;
 		intra_period.pframes = num_p;
 		intra_period.bframes = num_b;
 
-		/*
-		 *Incase firmware does not have B-Frame support,
-		 *offload the b-frame count to p-frame to make up
-		 *for the requested Intraperiod
-		 */
-		if (!inst->capability.bframe.max) {
-			intra_period.pframes = num_p + num_b;
-			intra_period.bframes = 0;
-			dprintk(VIDC_DBG,
-				"No bframe support, changing pframe from %d to %d\n",
-				num_p, intra_period.pframes);
-		}
 		pdata = &intra_period;
 		break;
 	}
@@ -1297,7 +1272,10 @@
 	case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
 	{
 		int final_mode = 0;
-		struct v4l2_ctrl update_ctrl = {.id = 0};
+		struct v4l2_ctrl update_ctrl;
+
+		update_ctrl.id = 0;
+		update_ctrl.val = 0;
 
 		/* V4L2_CID_MPEG_VIDEO_BITRATE_MODE and _RATE_CONTROL
 		 * manipulate the same thing.  If one control's state
@@ -1353,7 +1331,7 @@
 	{
 		property_id = HAL_CONFIG_VENC_TARGET_BITRATE;
 		bitrate.bit_rate = ctrl->val;
-		bitrate.layer_id = 0;
+		bitrate.layer_id = MSM_VIDC_ALL_LAYER_ID;
 		pdata = &bitrate;
 		inst->bitrate = ctrl->val;
 		break;
@@ -1976,7 +1954,7 @@
 int msm_venc_s_ext_ctrl(struct msm_vidc_inst *inst,
 	struct v4l2_ext_controls *ctrl)
 {
-	int rc = 0, i, j = 0;
+	int rc = 0, i;
 	struct v4l2_ext_control *control;
 	struct hfi_device *hdev;
 	struct hal_ltr_mode ltr_mode;
@@ -2044,32 +2022,6 @@
 			property_id = HAL_PROPERTY_PARAM_VENC_ASPECT_RATIO;
 			pdata = &sar;
 			break;
-		case V4L2_CID_MPEG_VIDC_VENC_PARAM_LAYER_BITRATE:
-		{
-			if (control[i].value) {
-				bitrate.layer_id = i;
-				bitrate.bit_rate = control[i].value;
-				property_id = HAL_CONFIG_VENC_TARGET_BITRATE;
-				pdata = &bitrate;
-				dprintk(VIDC_DBG, "bitrate for layer(%d)=%d\n",
-					i, bitrate.bit_rate);
-				rc = call_hfi_op(hdev, session_set_property,
-					(void *)inst->session, property_id,
-					 pdata);
-				if (rc) {
-					dprintk(VIDC_DBG, "prop %x failed\n",
-						property_id);
-					return rc;
-				}
-				if (i == MAX_HYBRID_HIER_P_LAYERS - 1) {
-					dprintk(VIDC_DBG, "HAL property=%x\n",
-						property_id);
-					property_id = 0;
-					rc = 0;
-				}
-			}
-			break;
-		}
 		case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_WIDTH:
 			property_id = HAL_CONFIG_VENC_BLUR_RESOLUTION;
 			blur_res.width = control[i].value;
@@ -2084,92 +2036,83 @@
 			pdata = &blur_res;
 			break;
 		case V4L2_CID_MPEG_VIDC_VIDEO_LAYER_ID:
-			j = i;
-			layer_id = control[j].value;
-			do {
-				switch (control[j].id) {
-				case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP:
-					qp.qpi = control[j].value;
-					qp.layer_id = layer_id;
-					property_id =
-						HAL_CONFIG_VENC_FRAME_QP;
-					pdata = &qp;
-					break;
-				case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP:
-					qp.qpp = control[j].value;
-					qp.layer_id = layer_id;
-					property_id =
-						HAL_CONFIG_VENC_FRAME_QP;
-					pdata = &qp;
-					break;
-				case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP:
-					qp.qpb = control[j].value;
-					qp.layer_id = layer_id;
-					property_id =
-						HAL_CONFIG_VENC_FRAME_QP;
-					pdata = &qp;
-					break;
-				case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MIN:
-					qp_range.qpi_min = control[j].value;
-					qp_range.layer_id = layer_id;
-					property_id =
-						HAL_PARAM_VENC_SESSION_QP_RANGE;
-					pdata = &qp_range;
-					break;
-				case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MIN:
-					qp_range.qpp_min = control[j].value;
-					qp_range.layer_id = layer_id;
-					property_id =
-						HAL_PARAM_VENC_SESSION_QP_RANGE;
-					pdata = &qp_range;
-					break;
-				case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MIN:
-					qp_range.qpb_min = control[j].value;
-					qp_range.layer_id = layer_id;
-					property_id =
-						HAL_PARAM_VENC_SESSION_QP_RANGE;
-					pdata = &qp_range;
-					break;
-				case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MAX:
-					qp_range.qpi_max = control[j].value;
-					qp_range.layer_id = layer_id;
-					property_id =
-						HAL_PARAM_VENC_SESSION_QP_RANGE;
-					pdata = &qp_range;
-					break;
-				case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MAX:
-					qp_range.qpp_max = control[j].value;
-					qp_range.layer_id = layer_id;
-					property_id =
-						HAL_PARAM_VENC_SESSION_QP_RANGE;
-					pdata = &qp_range;
-					break;
-				case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MAX:
-					qp_range.qpb_max = control[j].value;
-					qp_range.layer_id = layer_id;
-					property_id =
-						HAL_PARAM_VENC_SESSION_QP_RANGE;
-					pdata = &qp_range;
-					break;
-				}
-				j++;
-			} while ((j < ctrl->count) &&
-				control[j].id !=
-					V4L2_CID_MPEG_VIDC_VIDEO_LAYER_ID);
-			if (!rc && property_id) {
-				dprintk(VIDC_DBG, "Control: HAL property=%x\n",
-					 property_id);
-				rc = call_hfi_op(hdev, session_set_property,
-						(void *)inst->session,
-						property_id, pdata);
-				if (rc) {
-					dprintk(VIDC_ERR, "prop %x failed\n",
-						property_id);
-					return rc;
-				}
-				property_id = 0;
+			layer_id = control[i].value;
+			i++;
+			while (i < ctrl->count) {
+			switch (control[i].id) {
+			case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP:
+				qp.qpi = control[i].value;
+				qp.layer_id = layer_id;
+				property_id =
+					HAL_CONFIG_VENC_FRAME_QP;
+				pdata = &qp;
+				break;
+			case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP:
+				qp.qpp = control[i].value;
+				qp.layer_id = layer_id;
+				property_id =
+					HAL_CONFIG_VENC_FRAME_QP;
+				pdata = &qp;
+				break;
+			case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP:
+				qp.qpb = control[i].value;
+				qp.layer_id = layer_id;
+				property_id =
+					HAL_CONFIG_VENC_FRAME_QP;
+				pdata = &qp;
+				break;
+			case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MIN:
+				qp_range.qpi_min = control[i].value;
+				qp_range.layer_id = layer_id;
+				property_id =
+					HAL_PARAM_VENC_SESSION_QP_RANGE;
+				pdata = &qp_range;
+				break;
+			case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MIN:
+				qp_range.qpp_min = control[i].value;
+				qp_range.layer_id = layer_id;
+				property_id =
+				HAL_PARAM_VENC_SESSION_QP_RANGE;
+				pdata = &qp_range;
+				break;
+			case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MIN:
+				qp_range.qpb_min = control[i].value;
+				qp_range.layer_id = layer_id;
+				property_id =
+					HAL_PARAM_VENC_SESSION_QP_RANGE;
+				pdata = &qp_range;
+				break;
+			case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MAX:
+				qp_range.qpi_max = control[i].value;
+				qp_range.layer_id = layer_id;
+				property_id =
+					HAL_PARAM_VENC_SESSION_QP_RANGE;
+				pdata = &qp_range;
+				break;
+			case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MAX:
+				qp_range.qpp_max = control[i].value;
+				qp_range.layer_id = layer_id;
+				property_id =
+					HAL_PARAM_VENC_SESSION_QP_RANGE;
+				pdata = &qp_range;
+				break;
+			case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MAX:
+				qp_range.qpb_max = control[i].value;
+				qp_range.layer_id = layer_id;
+				property_id =
+					HAL_PARAM_VENC_SESSION_QP_RANGE;
+				pdata = &qp_range;
+				break;
+			case V4L2_CID_MPEG_VIDC_VENC_PARAM_LAYER_BITRATE:
+				bitrate.bit_rate = control[i].value;
+				bitrate.layer_id = layer_id;
+				property_id =
+					HAL_CONFIG_VENC_TARGET_BITRATE;
+				pdata = &bitrate;
+				break;
 			}
-			i = j - 1;
+			i++;
+			}
 			break;
 		default:
 			dprintk(VIDC_ERR, "Invalid id set: %d\n",
@@ -2286,6 +2229,7 @@
 	struct hfi_device *hdev;
 	int extra_idx = 0, i = 0;
 	struct hal_buffer_requirements *buff_req_buffer;
+	struct hal_frame_size frame_sz;
 
 	if (!inst || !f) {
 		dprintk(VIDC_ERR,
@@ -2324,6 +2268,19 @@
 		inst->prop.width[CAPTURE_PORT] = f->fmt.pix_mp.width;
 		inst->prop.height[CAPTURE_PORT] = f->fmt.pix_mp.height;
 
+		frame_sz.buffer_type = HAL_BUFFER_OUTPUT;
+		frame_sz.width = inst->prop.width[CAPTURE_PORT];
+		frame_sz.height = inst->prop.height[CAPTURE_PORT];
+		dprintk(VIDC_DBG, "CAPTURE port width = %d, height = %d\n",
+			frame_sz.width, frame_sz.height);
+		rc = call_hfi_op(hdev, session_set_property, (void *)
+			inst->session, HAL_PARAM_FRAME_SIZE, &frame_sz);
+		if (rc) {
+			dprintk(VIDC_ERR,
+				"Failed to set framesize for CAPTURE port\n");
+			goto exit;
+		}
+
 		rc = msm_comm_try_get_bufreqs(inst);
 		if (rc) {
 			dprintk(VIDC_ERR,
@@ -2372,7 +2329,7 @@
 		frame_sz.buffer_type = HAL_BUFFER_INPUT;
 		frame_sz.width = inst->prop.width[OUTPUT_PORT];
 		frame_sz.height = inst->prop.height[OUTPUT_PORT];
-		dprintk(VIDC_DBG, "width = %d, height = %d\n",
+		dprintk(VIDC_DBG, "OUTPUT port width = %d, height = %d\n",
 				frame_sz.width, frame_sz.height);
 		rc = call_hfi_op(hdev, session_set_property, (void *)
 			inst->session, HAL_PARAM_FRAME_SIZE, &frame_sz);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 114a702..d3af24e 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -26,6 +26,9 @@
 
 #define MAX_EVENTS 30
 
+static int try_get_ctrl(struct msm_vidc_inst *inst,
+	struct v4l2_ctrl *ctrl);
+
 static int get_poll_flags(void *instance)
 {
 	struct msm_vidc_inst *inst = instance;
@@ -150,6 +153,7 @@
 	case V4L2_CID_MPEG_VIDC_VIDEO_HIER_P_NUM_LAYERS:
 		msm_vidc_ctrl_get_range(ctrl, &inst->capability.hier_p);
 		break;
+	case V4L2_CID_MPEG_VIDC_VENC_PARAM_LAYER_BITRATE:
 	case  V4L2_CID_MPEG_VIDEO_BITRATE:
 		msm_vidc_ctrl_get_range(ctrl, &inst->capability.bitrate);
 		break;
@@ -253,11 +257,20 @@
 int msm_vidc_g_ctrl(void *instance, struct v4l2_control *control)
 {
 	struct msm_vidc_inst *inst = instance;
+	struct v4l2_ctrl *ctrl = NULL;
+	int rc = 0;
 
 	if (!inst || !control)
 		return -EINVAL;
 
-	return msm_comm_g_ctrl(instance, control);
+	ctrl = v4l2_ctrl_find(&inst->ctrl_handler, control->id);
+	if (ctrl) {
+		rc = try_get_ctrl(inst, ctrl);
+		if (!rc)
+			control->value = ctrl->val;
+	}
+
+	return rc;
 }
 EXPORT_SYMBOL(msm_vidc_g_ctrl);
 
@@ -1221,21 +1234,22 @@
 				HAL_BUFFER_INPUT);
 			return -EINVAL;
 		}
-		if (*num_buffers < bufreq->buffer_count_actual) {
+		if (*num_buffers < bufreq->buffer_count_min_host) {
 			dprintk(VIDC_ERR,
 				"Invalid parameters : Req = %d Act = %d\n",
-				*num_buffers, bufreq->buffer_count_actual);
+				*num_buffers, bufreq->buffer_count_min_host);
 			return -EINVAL;
 		}
 		*num_planes = inst->bufq[OUTPUT_PORT].num_planes;
 		if (*num_buffers < MIN_NUM_OUTPUT_BUFFERS ||
 			*num_buffers > MAX_NUM_OUTPUT_BUFFERS)
-			*num_buffers = MIN_NUM_OUTPUT_BUFFERS;
+			bufreq->buffer_count_actual = *num_buffers =
+				MIN_NUM_OUTPUT_BUFFERS;
 		for (i = 0; i < *num_planes; i++)
 			sizes[i] = inst->bufq[OUTPUT_PORT].plane_sizes[i];
 
 		bufreq->buffer_count_actual = *num_buffers;
-		rc = set_buffer_count(inst, bufreq->buffer_count_min_host,
+		rc = set_buffer_count(inst, bufreq->buffer_count_actual,
 			*num_buffers, HAL_BUFFER_INPUT);
 		}
 
@@ -1250,22 +1264,27 @@
 				buffer_type);
 			return -EINVAL;
 		}
-		if (*num_buffers < bufreq->buffer_count_actual) {
-			dprintk(VIDC_ERR,
-				"Invalid parameters : Req = %d Act = %d\n",
-				*num_buffers, bufreq->buffer_count_actual);
-			return -EINVAL;
+		if (inst->session_type != MSM_VIDC_DECODER &&
+			inst->state > MSM_VIDC_LOAD_RESOURCES_DONE) {
+			if (*num_buffers < bufreq->buffer_count_min_host) {
+				dprintk(VIDC_ERR,
+					"Invalid parameters : Req = %d Act = %d\n",
+						*num_buffers,
+						bufreq->buffer_count_min_host);
+				return -EINVAL;
+			}
 		}
 		*num_planes = inst->bufq[CAPTURE_PORT].num_planes;
 		if (*num_buffers < MIN_NUM_CAPTURE_BUFFERS ||
 			*num_buffers > MAX_NUM_CAPTURE_BUFFERS)
-			*num_buffers = MIN_NUM_CAPTURE_BUFFERS;
+			bufreq->buffer_count_actual = *num_buffers =
+				MIN_NUM_CAPTURE_BUFFERS;
 
 		for (i = 0; i < *num_planes; i++)
 			sizes[i] = inst->bufq[CAPTURE_PORT].plane_sizes[i];
 
 		bufreq->buffer_count_actual = *num_buffers;
-		rc = set_buffer_count(inst, bufreq->buffer_count_min_host,
+		rc = set_buffer_count(inst, bufreq->buffer_count_actual,
 			*num_buffers, buffer_type);
 		}
 		break;
@@ -1289,24 +1308,40 @@
 {
 	int rc = 0, i = 0;
 
+	/* For decoder No need to sanity till LOAD_RESOURCES */
+	if (inst->session_type == MSM_VIDC_DECODER &&
+			inst->state < MSM_VIDC_LOAD_RESOURCES_DONE) {
+		dprintk(VIDC_DBG,
+			"No need to verify buffer counts : %pK\n", inst);
+		return 0;
+	}
+
 	for (i = 0; i < HAL_BUFFER_MAX; i++) {
 		struct hal_buffer_requirements *req = &inst->buff_req.buffer[i];
 
-		dprintk(VIDC_DBG, "Verifying Buffer : %d\n", req->buffer_type);
-		if (!req ||
-			req->buffer_count_actual < req->buffer_count_min_host ||
-			req->buffer_count_min_host < req->buffer_count_min) {
-			dprintk(VIDC_ERR, "Invalid data : Counts mismatch\n");
-			dprintk(VIDC_ERR,
-				"Min Count = %d ", req->buffer_count_min);
-			dprintk(VIDC_ERR,
-				"Min Host Count = %d ",
-					req->buffer_count_min_host);
-			dprintk(VIDC_ERR,
-				"Min Actual Count = %d\n",
-					req->buffer_count_actual);
-			rc = -EINVAL;
-			break;
+		if (req && (msm_comm_get_hal_output_buffer(inst) ==
+				req->buffer_type)) {
+			dprintk(VIDC_DBG, "Verifying Buffer : %d\n",
+				req->buffer_type);
+			if (req->buffer_count_actual <
+					req->buffer_count_min_host ||
+				req->buffer_count_min_host <
+					req->buffer_count_min) {
+
+				dprintk(VIDC_ERR,
+					"Invalid data : Counts mismatch\n");
+				dprintk(VIDC_ERR,
+					"Min Count = %d ",
+						req->buffer_count_min);
+				dprintk(VIDC_ERR,
+					"Min Host Count = %d ",
+						req->buffer_count_min_host);
+				dprintk(VIDC_ERR,
+					"Min Actual Count = %d\n",
+						req->buffer_count_actual);
+				rc = -EINVAL;
+				break;
+			}
 		}
 	}
 	return rc;
@@ -1728,7 +1763,7 @@
 }
 
 
-static int msm_vdec_get_count(struct msm_vidc_inst *inst,
+static int msm_vidc_get_count(struct msm_vidc_inst *inst,
 	struct v4l2_ctrl *ctrl)
 {
 	int rc = 0;
@@ -1749,15 +1784,19 @@
 		}
 		if (ctrl->val > bufreq->buffer_count_min_host) {
 			dprintk(VIDC_DBG,
-				"Interesting : Usually shouldn't happen\n");
+				"Buffer count Host changed from %d to %d\n",
+					bufreq->buffer_count_min_host,
+					ctrl->val);
 			bufreq->buffer_count_min_host = ctrl->val;
+		} else {
+			ctrl->val = bufreq->buffer_count_min_host;
 		}
-		rc = set_actual_buffer_count(inst, ctrl->val,
+		rc = set_actual_buffer_count(inst,
+				bufreq->buffer_count_min_host,
 			HAL_BUFFER_INPUT);
 		return rc;
 
 	} else if (ctrl->id == V4L2_CID_MIN_BUFFERS_FOR_CAPTURE) {
-		int count = 0;
 
 		buffer_type = msm_comm_get_hal_output_buffer(inst);
 		bufreq = get_buff_req_buffer(inst,
@@ -1774,7 +1813,7 @@
 			else
 				return 0;
 		}
-		count = bufreq->buffer_count_min_host;
+
 
 		if (inst->in_reconfig) {
 			rc = msm_comm_try_get_bufreqs(inst);
@@ -1786,21 +1825,28 @@
 					buffer_type);
 				return 0;
 			}
-			newreq->buffer_count_min_host = count =
-				newreq->buffer_count_min +
-				msm_dcvs_get_extra_buff_count(inst);
+			ctrl->val = newreq->buffer_count_min;
 		}
-		if (!inst->in_reconfig &&
+		if (inst->session_type == MSM_VIDC_DECODER &&
+				!inst->in_reconfig &&
 			inst->state < MSM_VIDC_LOAD_RESOURCES_DONE) {
-			dprintk(VIDC_DBG, "Clients will correct this\n");
-			rc = set_actual_buffer_count(inst, ctrl->val,
-				buffer_type);
+			dprintk(VIDC_DBG,
+				"Clients updates Buffer count from %d to %d\n",
+				bufreq->buffer_count_min_host, ctrl->val);
 			bufreq->buffer_count_min_host = ctrl->val;
-			return 0;
 		}
-		bufreq->buffer_count_min_host = ctrl->val = count;
-		rc = set_actual_buffer_count(inst, ctrl->val,
-			buffer_type);
+		if (ctrl->val > bufreq->buffer_count_min_host) {
+			dprintk(VIDC_DBG,
+				"Buffer count Host changed from %d to %d\n",
+				bufreq->buffer_count_min_host,
+				ctrl->val);
+			bufreq->buffer_count_min_host = ctrl->val;
+		} else {
+			ctrl->val = bufreq->buffer_count_min_host;
+		}
+		rc = set_actual_buffer_count(inst,
+				bufreq->buffer_count_min_host,
+			HAL_BUFFER_OUTPUT);
 
 		return rc;
 	}
@@ -1816,7 +1862,6 @@
 	 * lower level code that attempts to do g_ctrl() will end up deadlocking
 	 * us.
 	 */
-	v4l2_ctrl_unlock(ctrl);
 
 	switch (ctrl->id) {
 
@@ -1837,7 +1882,7 @@
 
 	case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:
 	case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:
-		rc = msm_vdec_get_count(inst, ctrl);
+		rc = msm_vidc_get_count(inst, ctrl);
 		break;
 	default:
 		/*
@@ -1845,8 +1890,7 @@
 		 * modify ctrl->value
 		 */
 		break;
-	}
-	v4l2_ctrl_lock(ctrl);
+}
 
 	return rc;
 }
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
index 70427d3..1c78a45 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.c
@@ -422,19 +422,29 @@
 
 int msm_dcvs_try_enable(struct msm_vidc_inst *inst)
 {
+	bool force_disable = false;
+
 	if (!inst) {
 		dprintk(VIDC_ERR, "%s: Invalid args: %p\n", __func__, inst);
 		return -EINVAL;
 	}
-	if (inst->flags & VIDC_THUMBNAIL) {
+
+	force_disable = inst->session_type == MSM_VIDC_ENCODER ?
+		!msm_vidc_enc_dcvs_mode :
+		!msm_vidc_dec_dcvs_mode;
+
+	if (force_disable || inst->flags & VIDC_THUMBNAIL) {
 		dprintk(VIDC_PROF, "Thumbnail sessions don't need DCVS : %pK\n",
 			inst);
+		inst->dcvs.extra_capture_buffer_count = 0;
+		inst->dcvs.extra_output_buffer_count = 0;
 		return false;
 	}
 	inst->dcvs_mode = true;
 
 	// TODO : Update with proper number based on on-target tuning.
-	inst->dcvs.extra_buffer_count = DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
+	inst->dcvs.extra_capture_buffer_count = DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
+	inst->dcvs.extra_output_buffer_count = DCVS_DEC_EXTRA_OUTPUT_BUFFERS;
 	return true;
 }
 
@@ -566,14 +576,17 @@
 	msm_dcvs_print_dcvs_stats(dcvs);
 }
 
-int msm_dcvs_get_extra_buff_count(struct msm_vidc_inst *inst)
+int msm_vidc_get_extra_buff_count(struct msm_vidc_inst *inst,
+	enum hal_buffer buffer_type)
 {
 	if (!inst) {
 		dprintk(VIDC_ERR, "%s Invalid args\n", __func__);
 		return 0;
 	}
 
-	return inst->dcvs.extra_buffer_count;
+	return buffer_type == HAL_BUFFER_INPUT ?
+		inst->dcvs.extra_output_buffer_count :
+		inst->dcvs.extra_capture_buffer_count;
 }
 
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
index 0229ccbb..79fd8f6 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_clocks.h
@@ -32,7 +32,8 @@
 #define DCVS_BUFFER_SAFEGUARD (DCVS_DEC_EXTRA_OUTPUT_BUFFERS - 1)
 
 void msm_dcvs_init(struct msm_vidc_inst *inst);
-int  msm_dcvs_get_extra_buff_count(struct msm_vidc_inst *inst);
+int msm_vidc_get_extra_buff_count(struct msm_vidc_inst *inst,
+	enum hal_buffer buffer_type);
 int msm_dcvs_try_enable(struct msm_vidc_inst *inst);
 int msm_comm_scale_clocks_and_bus(struct msm_vidc_inst *inst);
 int msm_comm_init_clocks_and_bus_data(struct msm_vidc_inst *inst);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 5e49f42..f002e76 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -23,20 +23,9 @@
 #include "msm_vidc_debug.h"
 #include "msm_vidc_clocks.h"
 
-#define IS_ALREADY_IN_STATE(__p, __d) ({\
-	int __rc = (__p >= __d);\
-	__rc; \
-})
-
-#define SUM_ARRAY(__arr, __start, __end) ({\
-		int __index;\
-		typeof((__arr)[0]) __sum = 0;\
-		for (__index = (__start); __index <= (__end); __index++) {\
-			if (__index >= 0 && __index < ARRAY_SIZE(__arr))\
-				__sum += __arr[__index];\
-		} \
-		__sum;\
-})
+#define IS_ALREADY_IN_STATE(__p, __d) (\
+	(__p >= __d)\
+)
 
 #define V4L2_EVENT_SEQ_CHANGED_SUFFICIENT \
 		V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_CHANGED_SUFFICIENT
@@ -129,7 +118,7 @@
 	};
 
 	rc = msm_comm_g_ctrl(inst, &ctrl);
-	return rc ?: ctrl.value;
+	return rc ? rc : ctrl.value;
 }
 
 static struct v4l2_ctrl **get_super_cluster(struct msm_vidc_inst *inst,
@@ -874,11 +863,13 @@
 
 	/* This should come from sys_init_done */
 	core->resources.max_inst_count =
-		sys_init_msg->max_sessions_supported ? :
+		sys_init_msg->max_sessions_supported ?
+		sys_init_msg->max_sessions_supported :
 		MAX_SUPPORTED_INSTANCES;
 
 	core->resources.max_secure_inst_count =
-		core->resources.max_secure_inst_count ? :
+		core->resources.max_secure_inst_count ?
+		core->resources.max_secure_inst_count :
 		core->resources.max_inst_count;
 
 	if (core->id == MSM_VIDC_CORE_VENUS &&
@@ -1185,6 +1176,9 @@
 			&inst->capability.hier_p);
 	msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDEO_BITRATE,
 			&inst->capability.bitrate);
+	msm_vidc_comm_update_ctrl(inst,
+			V4L2_CID_MPEG_VIDC_VENC_PARAM_LAYER_BITRATE,
+			&inst->capability.bitrate);
 	msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
 			&inst->capability.peakbitrate);
 	msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP,
@@ -3850,17 +3844,17 @@
 	 * Don't queue if:
 	 * 1) Hardware isn't ready (that's simple)
 	 */
-	defer = defer ?: inst->state != MSM_VIDC_START_DONE;
+	defer = defer ? defer : (inst->state != MSM_VIDC_START_DONE);
 
 	/*
 	 * 2) The client explicitly tells us not to because it wants this
 	 * buffer to be batched with future frames.  The batch size (on both
 	 * capabilities) is completely determined by the client.
 	 */
-	defer = defer ?: vbuf && vbuf->flags & V4L2_MSM_BUF_FLAG_DEFER;
+	defer = defer ? defer : (vbuf && vbuf->flags & V4L2_MSM_BUF_FLAG_DEFER);
 
 	/* 3) If we're in batch mode, we must have full batches of both types */
-	defer = defer ?: batch_mode && (!output_count || !capture_count);
+	defer = defer ? defer:(batch_mode && (!output_count || !capture_count));
 
 	if (defer) {
 		dprintk(VIDC_DBG, "Deferring queue of %pK\n", vb);
@@ -3998,32 +3992,89 @@
 	return rc;
 }
 
+static int msm_vidc_update_host_buff_counts(struct msm_vidc_inst *inst)
+{
+	int extra_buffers, buffer_type;
+	struct hal_buffer_requirements *bufreq;
+
+	bufreq = get_buff_req_buffer(inst,
+		HAL_BUFFER_INPUT);
+	if (!bufreq) {
+		dprintk(VIDC_ERR,
+			"Failed : No buffer requirements : %x\n",
+			HAL_BUFFER_INPUT);
+		return -EINVAL;
+	}
+	extra_buffers = msm_vidc_get_extra_buff_count(inst, HAL_BUFFER_INPUT);
+
+	bufreq->buffer_count_min_host = bufreq->buffer_count_min +
+		extra_buffers;
+
+	buffer_type = msm_comm_get_hal_output_buffer(inst);
+	bufreq = get_buff_req_buffer(inst,
+		buffer_type);
+	if (!bufreq) {
+		dprintk(VIDC_ERR,
+			"Failed : No buffer requirements : %x\n",
+			buffer_type);
+		return -EINVAL;
+	}
+
+	extra_buffers = msm_vidc_get_extra_buff_count(inst, buffer_type);
+
+	bufreq->buffer_count_min_host = bufreq->buffer_count_min +
+		extra_buffers;
+
+	return 0;
+}
+
 int msm_comm_try_get_bufreqs(struct msm_vidc_inst *inst)
 {
 	int rc = 0, i = 0;
 	union hal_get_property hprop;
 
+	memset(&hprop, 0x0, sizeof(hprop));
+
 	rc = msm_comm_try_get_prop(inst, HAL_PARAM_GET_BUFFER_REQUIREMENTS,
-					&hprop);
+		&hprop);
 	if (rc) {
 		dprintk(VIDC_ERR, "Failed getting buffer requirements: %d", rc);
 		return rc;
 	}
 
-	dprintk(VIDC_DBG, "Buffer requirements:\n");
-	dprintk(VIDC_DBG, "%15s %8s %8s\n", "buffer type", "count", "size");
+	dprintk(VIDC_DBG, "Buffer requirements from HW:\n");
+	dprintk(VIDC_DBG, "%15s %8s %8s %8s %8s\n",
+		"buffer type", "count", "mincount_host", "mincount_fw", "size");
 	for (i = 0; i < HAL_BUFFER_MAX; i++) {
 		struct hal_buffer_requirements req = hprop.buf_req.buffer[i];
 
 		inst->buff_req.buffer[i] = req;
-		dprintk(VIDC_DBG, "%15s %8d %8d\n",
+		if (req.buffer_type != HAL_BUFFER_NONE) {
+			dprintk(VIDC_DBG, "%15s %8d %8d %8d %8d\n",
 				get_buffer_name(req.buffer_type),
-				req.buffer_count_actual, req.buffer_size);
+				req.buffer_count_actual,
+				req.buffer_count_min_host,
+				req.buffer_count_min, req.buffer_size);
+		}
 	}
 
-	dprintk(VIDC_PROF, "Input buffers: %d, Output buffers: %d\n",
-			inst->buff_req.buffer[0].buffer_count_actual,
-			inst->buff_req.buffer[1].buffer_count_actual);
+	rc = msm_vidc_update_host_buff_counts(inst);
+
+	dprintk(VIDC_DBG, "Buffer requirements host adjusted:\n");
+	dprintk(VIDC_DBG, "%15s %8s %8s %8s %8s\n",
+		"buffer type", "count", "mincount_host", "mincount_fw", "size");
+	for (i = 0; i < HAL_BUFFER_MAX; i++) {
+		struct hal_buffer_requirements req = hprop.buf_req.buffer[i];
+
+		inst->buff_req.buffer[i] = req;
+		if (req.buffer_type != HAL_BUFFER_NONE) {
+			dprintk(VIDC_DBG, "%15s %8d %8d %8d %8d\n",
+				get_buffer_name(req.buffer_type),
+				req.buffer_count_actual,
+				req.buffer_count_min_host,
+				req.buffer_count_min, req.buffer_size);
+		}
+	}
 	return rc;
 }
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_internal.h b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
index 8562e8f..4b91193 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_internal.h
+++ b/drivers/media/platform/msm/vidc/msm_vidc_internal.h
@@ -212,7 +212,8 @@
 	int load_high;
 	int min_threshold;
 	int max_threshold;
-	unsigned int extra_buffer_count;
+	unsigned int extra_capture_buffer_count;
+	unsigned int extra_output_buffer_count;
 	enum hal_buffer buffer_type;
 };
 
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 8b9018c..763c41d 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -499,7 +499,7 @@
 				"qcom,vpp-cycles-per-mb", NULL)) {
 			rc = of_property_read_u32(child_node,
 					"qcom,vpp-cycles-per-mb",
-					&entry->vsp_cycles);
+					&entry->vpp_cycles);
 			if (rc) {
 				dprintk(VIDC_ERR,
 					"qcom,vpp-cycles-per-mb not found\n");
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 2a833dc..48a6f17 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -588,6 +588,7 @@
 struct hfi_frame_cr_stats_type {
 	u32 frame_index;
 	struct hfi_ubwc_cr_stats_info_type ubwc_stats_info;
+	u32 complexity_number;
 };
 
 struct hfi_msg_session_empty_buffer_done_packet {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 8aa0bbb..28bb7ab 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -20,17 +20,15 @@
 #include <media/msm_vidc.h>
 #include "msm_vidc_resources.h"
 
-#define CONTAINS(__a, __sz, __t) ({\
-	int __rc = __t >= __a && \
-			__t < __a + __sz; \
-	__rc; \
-})
+#define CONTAINS(__a, __sz, __t) (\
+	(__t >= __a) && \
+	(__t < __a + __sz) \
+)
 
-#define OVERLAPS(__t, __tsz, __a, __asz) ({\
-	int __rc = __t <= __a && \
-			__t + __tsz >= __a + __asz; \
-	__rc; \
-})
+#define OVERLAPS(__t, __tsz, __a, __asz) (\
+	(__t <= __a) && \
+	(__t + __tsz >= __a + __asz) \
+)
 
 #define HAL_BUFFERFLAG_EOS              0x00000001
 #define HAL_BUFFERFLAG_STARTTIME        0x00000002
@@ -191,7 +189,6 @@
 	HAL_CONFIG_VENC_MAX_BITRATE,
 	HAL_PARAM_VENC_H264_VUI_TIMING_INFO,
 	HAL_PARAM_VENC_GENERATE_AUDNAL,
-	HAL_PARAM_VENC_MAX_NUM_B_FRAMES,
 	HAL_PARAM_BUFFER_ALLOC_MODE,
 	HAL_PARAM_VDEC_FRAME_ASSEMBLY,
 	HAL_PARAM_VENC_PRESERVE_TEXT_QUALITY,
@@ -818,10 +815,6 @@
 	u32 time_scale;
 };
 
-struct hal_h264_vui_bitstream_restrc {
-	u32 enable;
-};
-
 struct hal_preserve_text_quality {
 	u32 enable;
 };
@@ -1018,7 +1011,6 @@
 	struct hal_multi_view_select multi_view_select;
 	struct hal_timestamp_scale timestamp_scale;
 	struct hal_h264_vui_timing_info h264_vui_timing_info;
-	struct hal_h264_vui_bitstream_restrc h264_vui_bitstream_restrc;
 	struct hal_preserve_text_quality preserve_text_quality;
 	struct hal_buffer_info buffer_info;
 	struct hal_buffer_alloc_mode buffer_alloc_mode;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 0d73410..bc7e8bd 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -276,8 +276,6 @@
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01D)
 #define HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO	\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01E)
-#define  HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES \
-	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x020)
 #define HFI_PROPERTY_PARAM_VENC_LOW_LATENCY_MODE	\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x022)
 #define HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY \
@@ -465,10 +463,6 @@
 	u32 flip;
 };
 
-struct hfi_max_num_b_frames {
-	u32 max_num_b_frames;
-};
-
 struct hfi_conceal_color {
 	u32 conceal_color;
 };
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index d593315..0ac1cf7 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -479,6 +479,14 @@
 	  the genalloc API. It is supposed to be used for small on-chip SRAM
 	  areas found on many SoCs.
 
+config QSEECOM
+        tristate "QTI Secure Execution Communicator driver"
+        help
+          Provides a communication interface between userspace and
+          QTI Secure Execution Environment (QSEE) using Secure Channel
+          Manager (SCM) interface. It exposes APIs for both userspace and
+          kernel clients.
+
 config VEXPRESS_SYSCFG
 	bool "Versatile Express System Configuration driver"
 	depends on VEXPRESS_CONFIG
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index dd12e9a..e1c6ae1 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -49,6 +49,7 @@
 obj-$(CONFIG_SRAM)		+= sram.o
 obj-y				+= mic/
 obj-$(CONFIG_GENWQE)		+= genwqe/
+obj-$(CONFIG_QSEECOM)		+= qseecom.o
 obj-$(CONFIG_ECHO)		+= echo/
 obj-$(CONFIG_VEXPRESS_SYSCFG)	+= vexpress-syscfg.o
 obj-$(CONFIG_CXL_BASE)		+= cxl/
diff --git a/drivers/misc/compat_qseecom.c b/drivers/misc/compat_qseecom.c
new file mode 100644
index 0000000..96d200f
--- /dev/null
+++ b/drivers/misc/compat_qseecom.c
@@ -0,0 +1,922 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/qseecom.h>
+#include <linux/compat.h>
+#include "compat_qseecom.h"
+
+static int compat_get_qseecom_register_listener_req(
+		struct compat_qseecom_register_listener_req __user *data32,
+		struct qseecom_register_listener_req __user *data)
+{
+	int err;
+	compat_ulong_t listener_id;
+	compat_long_t ifd_data_fd;
+	compat_uptr_t virt_sb_base;
+	compat_ulong_t sb_size;
+
+	err = get_user(listener_id, &data32->listener_id);
+	err |= put_user(listener_id, &data->listener_id);
+	err |= get_user(ifd_data_fd, &data32->ifd_data_fd);
+	err |= put_user(ifd_data_fd, &data->ifd_data_fd);
+
+	err |= get_user(virt_sb_base, &data32->virt_sb_base);
+	/* upper bits won't get set, zero them */
+	err |= put_user(NULL, &data->virt_sb_base);
+	err |= put_user(virt_sb_base, (compat_uptr_t *)&data->virt_sb_base);
+
+	err |= get_user(sb_size, &data32->sb_size);
+	err |= put_user(sb_size, &data->sb_size);
+	return err;
+}
+
+static int compat_get_qseecom_load_img_req(
+		struct compat_qseecom_load_img_req __user *data32,
+		struct qseecom_load_img_req __user *data)
+{
+	int err;
+	compat_ulong_t mdt_len;
+	compat_ulong_t img_len;
+	compat_long_t ifd_data_fd;
+	compat_ulong_t app_arch;
+	compat_uint_t app_id;
+
+	err = get_user(mdt_len, &data32->mdt_len);
+	err |= put_user(mdt_len, &data->mdt_len);
+	err |= get_user(img_len, &data32->img_len);
+	err |= put_user(img_len, &data->img_len);
+	err |= get_user(ifd_data_fd, &data32->ifd_data_fd);
+	err |= put_user(ifd_data_fd, &data->ifd_data_fd);
+	err |= copy_in_user(data->img_name, data32->img_name,
+				MAX_APP_NAME_SIZE);
+	err |= get_user(app_arch, &data32->app_arch);
+	err |= put_user(app_arch, &data->app_arch);
+	err |= get_user(app_id, &data32->app_id);
+	err |= put_user(app_id, &data->app_id);
+	return err;
+}
+
+static int compat_get_qseecom_send_cmd_req(
+		struct compat_qseecom_send_cmd_req __user *data32,
+		struct qseecom_send_cmd_req __user *data)
+{
+	int err;
+	compat_uptr_t cmd_req_buf;
+	compat_uint_t cmd_req_len;
+	compat_uptr_t resp_buf;
+	compat_uint_t resp_len;
+
+	err = get_user(cmd_req_buf, &data32->cmd_req_buf);
+	err |= put_user(NULL, &data->cmd_req_buf);
+	err |= put_user(cmd_req_buf, (compat_uptr_t *)&data->cmd_req_buf);
+	err |= get_user(cmd_req_len, &data32->cmd_req_len);
+	err |= put_user(cmd_req_len, &data->cmd_req_len);
+
+	err |= get_user(resp_buf, &data32->resp_buf);
+	err |= put_user(NULL, &data->resp_buf);
+	err |= put_user(resp_buf, (compat_uptr_t *)&data->resp_buf);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+	return err;
+}
+
+static int compat_get_qseecom_send_modfd_cmd_req(
+		struct compat_qseecom_send_modfd_cmd_req __user *data32,
+		struct qseecom_send_modfd_cmd_req __user *data)
+{
+	int err;
+	unsigned int i;
+	compat_uptr_t cmd_req_buf;
+	compat_uint_t cmd_req_len;
+	compat_uptr_t resp_buf;
+	compat_uint_t resp_len;
+	compat_long_t fd;
+	compat_ulong_t cmd_buf_offset;
+
+	err = get_user(cmd_req_buf, &data32->cmd_req_buf);
+	err |= put_user(NULL, &data->cmd_req_buf);
+	err |= put_user(cmd_req_buf, (compat_uptr_t *)&data->cmd_req_buf);
+	err |= get_user(cmd_req_len, &data32->cmd_req_len);
+	err |= put_user(cmd_req_len, &data->cmd_req_len);
+	err |= get_user(resp_buf, &data32->resp_buf);
+	err |= put_user(NULL, &data->resp_buf);
+	err |= put_user(resp_buf, (compat_uptr_t *)&data->resp_buf);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+	for (i = 0; i < MAX_ION_FD; i++) {
+		err |= get_user(fd, &data32->ifd_data[i].fd);
+		err |= put_user(fd, &data->ifd_data[i].fd);
+		err |= get_user(cmd_buf_offset,
+				&data32->ifd_data[i].cmd_buf_offset);
+		err |= put_user(cmd_buf_offset,
+				&data->ifd_data[i].cmd_buf_offset);
+	}
+	return err;
+}
+
+static int compat_get_qseecom_set_sb_mem_param_req(
+		struct compat_qseecom_set_sb_mem_param_req __user *data32,
+		struct qseecom_set_sb_mem_param_req __user *data)
+{
+	int err;
+	compat_long_t ifd_data_fd;
+	compat_uptr_t virt_sb_base;
+	compat_ulong_t sb_len;
+
+	err = get_user(ifd_data_fd, &data32->ifd_data_fd);
+	err |= put_user(ifd_data_fd, &data->ifd_data_fd);
+	err |= get_user(virt_sb_base, &data32->virt_sb_base);
+	err |= put_user(NULL, &data->virt_sb_base);
+	err |= put_user(virt_sb_base, (compat_uptr_t *)&data->virt_sb_base);
+	err |= get_user(sb_len, &data32->sb_len);
+	err |= put_user(sb_len, &data->sb_len);
+	return err;
+}
+
+static int compat_get_qseecom_qseos_version_req(
+		struct compat_qseecom_qseos_version_req __user *data32,
+		struct qseecom_qseos_version_req __user *data)
+{
+	int err;
+	compat_uint_t qseos_version;
+
+	err = get_user(qseos_version, &data32->qseos_version);
+	err |= put_user(qseos_version, &data->qseos_version);
+	return err;
+}
+
+static int compat_get_qseecom_qseos_app_load_query(
+		struct compat_qseecom_qseos_app_load_query __user *data32,
+		struct qseecom_qseos_app_load_query __user *data)
+{
+	int err = 0;
+	unsigned int i;
+	compat_uint_t app_id;
+	char app_name;
+	compat_ulong_t app_arch;
+
+	for (i = 0; i < MAX_APP_NAME_SIZE; i++) {
+		err |= get_user(app_name, &(data32->app_name[i]));
+		err |= put_user(app_name, &(data->app_name[i]));
+	}
+	err |= get_user(app_id, &data32->app_id);
+	err |= put_user(app_id, &data->app_id);
+	err |= get_user(app_arch, &data32->app_arch);
+	err |= put_user(app_arch, &data->app_arch);
+	return err;
+}
+
+static int compat_get_qseecom_send_svc_cmd_req(
+		struct compat_qseecom_send_svc_cmd_req __user *data32,
+		struct qseecom_send_svc_cmd_req __user *data)
+{
+	int err;
+	compat_ulong_t cmd_id;
+	compat_uptr_t cmd_req_buf;
+	compat_uint_t cmd_req_len;
+	compat_uptr_t resp_buf;
+	compat_uint_t resp_len;
+
+	err = get_user(cmd_id, &data32->cmd_id);
+	err |= put_user(cmd_id, &data->cmd_id);
+	err |= get_user(cmd_req_buf, &data32->cmd_req_buf);
+	err |= put_user(NULL, &data->cmd_req_buf);
+	err |= put_user(cmd_req_buf, (compat_uptr_t *)&data->cmd_req_buf);
+	err |= get_user(cmd_req_len, &data32->cmd_req_len);
+	err |= put_user(cmd_req_len, &data->cmd_req_len);
+	err |= get_user(resp_buf, &data32->resp_buf);
+	err |= put_user(NULL, &data->resp_buf);
+	err |= put_user(resp_buf, (compat_uptr_t *)&data->resp_buf);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+	return err;
+}
+
+static int compat_get_qseecom_create_key_req(
+		struct compat_qseecom_create_key_req __user *data32,
+		struct qseecom_create_key_req __user *data)
+{
+	int err;
+	compat_uint_t usage;
+
+	err = copy_in_user(data->hash32, data32->hash32, QSEECOM_HASH_SIZE);
+	err |= get_user(usage, &data32->usage);
+	err |= put_user(usage, &data->usage);
+
+	return err;
+}
+
+static int compat_get_qseecom_wipe_key_req(
+		struct compat_qseecom_wipe_key_req __user *data32,
+		struct qseecom_wipe_key_req __user *data)
+{
+	int err;
+	compat_uint_t usage;
+	compat_int_t wipe_key_flag;
+
+	err = get_user(usage, &data32->usage);
+	err |= put_user(usage, &data->usage);
+	err |= get_user(wipe_key_flag, &data32->wipe_key_flag);
+	err |= put_user(wipe_key_flag, &data->wipe_key_flag);
+
+	return err;
+}
+
+static int compat_get_qseecom_update_key_userinfo_req(
+		struct compat_qseecom_update_key_userinfo_req __user *data32,
+		struct qseecom_update_key_userinfo_req __user *data)
+{
+	int err = 0;
+	compat_uint_t usage;
+
+	err = copy_in_user(data->current_hash32, data32->current_hash32,
+				QSEECOM_HASH_SIZE);
+	err |= copy_in_user(data->new_hash32, data32->new_hash32,
+				QSEECOM_HASH_SIZE);
+	err |= get_user(usage, &data32->usage);
+	err |= put_user(usage, &data->usage);
+
+	return err;
+}
+
+static int compat_get_qseecom_save_partition_hash_req(
+		struct compat_qseecom_save_partition_hash_req __user *data32,
+		struct qseecom_save_partition_hash_req __user *data)
+{
+	int err;
+	compat_int_t partition_id;
+
+	err = get_user(partition_id, &data32->partition_id);
+	err |= put_user(partition_id, &data->partition_id);
+	err |= copy_in_user(data->digest, data32->digest,
+				SHA256_DIGEST_LENGTH);
+	return err;
+}
+
+static int compat_get_qseecom_is_es_activated_req(
+		struct compat_qseecom_is_es_activated_req __user *data32,
+		struct qseecom_is_es_activated_req __user *data)
+{
+	compat_int_t is_activated;
+	int err;
+
+	err = get_user(is_activated, &data32->is_activated);
+	err |= put_user(is_activated, &data->is_activated);
+	return err;
+}
+
+static int compat_get_qseecom_mdtp_cipher_dip_req(
+		struct compat_qseecom_mdtp_cipher_dip_req __user *data32,
+		struct qseecom_mdtp_cipher_dip_req __user *data)
+{
+	int err;
+	compat_int_t in_buf_size;
+	compat_uptr_t in_buf;
+	compat_int_t out_buf_size;
+	compat_uptr_t out_buf;
+	compat_int_t direction;
+
+	err = get_user(in_buf_size, &data32->in_buf_size);
+	err |= put_user(in_buf_size, &data->in_buf_size);
+	err |= get_user(out_buf_size, &data32->out_buf_size);
+	err |= put_user(out_buf_size, &data->out_buf_size);
+	err |= get_user(direction, &data32->direction);
+	err |= put_user(direction, &data->direction);
+	err |= get_user(in_buf, &data32->in_buf);
+	err |= put_user(NULL, &data->in_buf);
+	err |= put_user(in_buf, (compat_uptr_t *)&data->in_buf);
+	err |= get_user(out_buf, &data32->out_buf);
+	err |= put_user(NULL, &data->out_buf);
+	err |= put_user(out_buf, (compat_uptr_t *)&data->out_buf);
+
+	return err;
+}
+
+static int compat_get_qseecom_send_modfd_listener_resp(
+		struct compat_qseecom_send_modfd_listener_resp __user *data32,
+		struct qseecom_send_modfd_listener_resp __user *data)
+{
+	int err;
+	unsigned int i;
+	compat_uptr_t resp_buf_ptr;
+	compat_uint_t resp_len;
+	compat_long_t fd;
+	compat_ulong_t cmd_buf_offset;
+
+	err = get_user(resp_buf_ptr, &data32->resp_buf_ptr);
+	err |= put_user(NULL, &data->resp_buf_ptr);
+	err |= put_user(resp_buf_ptr, (compat_uptr_t *)&data->resp_buf_ptr);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		err |= get_user(fd, &data32->ifd_data[i].fd);
+		err |= put_user(fd, &data->ifd_data[i].fd);
+		err |= get_user(cmd_buf_offset,
+				&data32->ifd_data[i].cmd_buf_offset);
+		err |= put_user(cmd_buf_offset,
+				&data->ifd_data[i].cmd_buf_offset);
+	}
+	return err;
+}
+
+
+static int compat_get_qseecom_qteec_req(
+		struct compat_qseecom_qteec_req __user *data32,
+		struct qseecom_qteec_req __user *data)
+{
+	compat_uptr_t req_ptr;
+	compat_ulong_t req_len;
+	compat_uptr_t resp_ptr;
+	compat_ulong_t resp_len;
+	int err;
+
+	err = get_user(req_ptr, &data32->req_ptr);
+	err |= put_user(NULL, &data->req_ptr);
+	err |= put_user(req_ptr, (compat_uptr_t *)&data->req_ptr);
+	err |= get_user(req_len, &data32->req_len);
+	err |= put_user(req_len, &data->req_len);
+
+	err |= get_user(resp_ptr, &data32->resp_ptr);
+	err |= put_user(NULL, &data->resp_ptr);
+	err |= put_user(resp_ptr, (compat_uptr_t *)&data->resp_ptr);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+	return err;
+}
+
+static int compat_get_qseecom_qteec_modfd_req(
+		struct compat_qseecom_qteec_modfd_req __user *data32,
+		struct qseecom_qteec_modfd_req __user *data)
+{
+	compat_uptr_t req_ptr;
+	compat_ulong_t req_len;
+	compat_uptr_t resp_ptr;
+	compat_ulong_t resp_len;
+	compat_long_t fd;
+	compat_ulong_t cmd_buf_offset;
+	int err, i;
+
+	err = get_user(req_ptr, &data32->req_ptr);
+	err |= put_user(NULL, &data->req_ptr);
+	err |= put_user(req_ptr, (compat_uptr_t *)&data->req_ptr);
+	err |= get_user(req_len, &data32->req_len);
+	err |= put_user(req_len, &data->req_len);
+
+	err |= get_user(resp_ptr, &data32->resp_ptr);
+	err |= put_user(NULL, &data->resp_ptr);
+	err |= put_user(resp_ptr, (compat_uptr_t *)&data->resp_ptr);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		err |= get_user(fd, &data32->ifd_data[i].fd);
+		err |= put_user(fd, &data->ifd_data[i].fd);
+		err |= get_user(cmd_buf_offset,
+				&data32->ifd_data[i].cmd_buf_offset);
+		err |= put_user(cmd_buf_offset,
+				&data->ifd_data[i].cmd_buf_offset);
+	}
+	return err;
+}
+
+static int compat_get_int(compat_int_t __user *data32,
+		int __user *data)
+{
+	compat_int_t x;
+	int err;
+
+	err = get_user(x, data32);
+	err |= put_user(x, data);
+	return err;
+}
+
+static int compat_put_qseecom_load_img_req(
+		struct compat_qseecom_load_img_req __user *data32,
+		struct qseecom_load_img_req __user *data)
+{
+	int err;
+	compat_ulong_t mdt_len;
+	compat_ulong_t img_len;
+	compat_long_t ifd_data_fd;
+	compat_ulong_t app_arch;
+	compat_int_t app_id;
+
+	err = get_user(mdt_len, &data->mdt_len);
+	err |= put_user(mdt_len, &data32->mdt_len);
+	err |= get_user(img_len, &data->img_len);
+	err |= put_user(img_len, &data32->img_len);
+	err |= get_user(ifd_data_fd, &data->ifd_data_fd);
+	err |= put_user(ifd_data_fd, &data32->ifd_data_fd);
+	err |= copy_in_user(data32->img_name, data->img_name,
+				MAX_APP_NAME_SIZE);
+	err |= get_user(app_arch, &data->app_arch);
+	err |= put_user(app_arch, &data32->app_arch);
+	err |= get_user(app_id, &data->app_id);
+	err |= put_user(app_id, &data32->app_id);
+	return err;
+}
+
+static int compat_put_qseecom_qseos_version_req(
+		struct compat_qseecom_qseos_version_req __user *data32,
+		struct qseecom_qseos_version_req __user *data)
+{
+	compat_uint_t qseos_version;
+	int err;
+
+	err = get_user(qseos_version, &data->qseos_version);
+	err |= put_user(qseos_version, &data32->qseos_version);
+	return err;
+}
+
+static int compat_put_qseecom_qseos_app_load_query(
+		struct compat_qseecom_qseos_app_load_query __user *data32,
+		struct qseecom_qseos_app_load_query __user *data)
+{
+	int err = 0;
+	unsigned int i;
+	compat_int_t app_id;
+	compat_ulong_t app_arch;
+	char app_name;
+
+	for (i = 0; i < MAX_APP_NAME_SIZE; i++) {
+		err |= get_user(app_name, &(data->app_name[i]));
+		err |= put_user(app_name, &(data32->app_name[i]));
+	}
+	err |= get_user(app_id, &data->app_id);
+	err |= put_user(app_id, &data32->app_id);
+	err |= get_user(app_arch, &data->app_arch);
+	err |= put_user(app_arch, &data32->app_arch);
+
+	return err;
+}
+
+static int compat_put_qseecom_is_es_activated_req(
+		struct compat_qseecom_is_es_activated_req __user *data32,
+		struct qseecom_is_es_activated_req __user *data)
+{
+	compat_int_t is_activated;
+	int err;
+
+	err = get_user(is_activated, &data->is_activated);
+	err |= put_user(is_activated, &data32->is_activated);
+	return err;
+}
+
+static unsigned int convert_cmd(unsigned int cmd)
+{
+	switch (cmd) {
+	case COMPAT_QSEECOM_IOCTL_REGISTER_LISTENER_REQ:
+		return QSEECOM_IOCTL_REGISTER_LISTENER_REQ;
+	case COMPAT_QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ:
+		return QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ;
+	case COMPAT_QSEECOM_IOCTL_LOAD_APP_REQ:
+		return QSEECOM_IOCTL_LOAD_APP_REQ;
+	case COMPAT_QSEECOM_IOCTL_RECEIVE_REQ:
+		return QSEECOM_IOCTL_RECEIVE_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_RESP_REQ:
+		return QSEECOM_IOCTL_SEND_RESP_REQ;
+	case COMPAT_QSEECOM_IOCTL_UNLOAD_APP_REQ:
+		return QSEECOM_IOCTL_UNLOAD_APP_REQ;
+	case COMPAT_QSEECOM_IOCTL_PERF_ENABLE_REQ:
+		return QSEECOM_IOCTL_PERF_ENABLE_REQ;
+	case COMPAT_QSEECOM_IOCTL_PERF_DISABLE_REQ:
+		return QSEECOM_IOCTL_PERF_DISABLE_REQ;
+	case COMPAT_QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ:
+		return QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ;
+	case COMPAT_QSEECOM_IOCTL_SET_BUS_SCALING_REQ:
+		return QSEECOM_IOCTL_SET_BUS_SCALING_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_CMD_REQ:
+		return QSEECOM_IOCTL_SEND_CMD_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
+		return QSEECOM_IOCTL_SEND_MODFD_CMD_REQ;
+	case COMPAT_QSEECOM_IOCTL_SET_MEM_PARAM_REQ:
+		return QSEECOM_IOCTL_SET_MEM_PARAM_REQ;
+	case COMPAT_QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ:
+		return QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ;
+	case COMPAT_QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ:
+		return QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ;
+	case COMPAT_QSEECOM_IOCTL_APP_LOADED_QUERY_REQ:
+		return QSEECOM_IOCTL_APP_LOADED_QUERY_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ:
+		return QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ;
+	case COMPAT_QSEECOM_IOCTL_CREATE_KEY_REQ:
+		return QSEECOM_IOCTL_CREATE_KEY_REQ;
+	case COMPAT_QSEECOM_IOCTL_WIPE_KEY_REQ:
+		return QSEECOM_IOCTL_WIPE_KEY_REQ;
+	case COMPAT_QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ:
+		return QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ;
+	case COMPAT_QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ:
+		return QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ;
+	case COMPAT_QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ:
+		return QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP:
+		return QSEECOM_IOCTL_SEND_MODFD_RESP;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ:
+		return QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ:
+		return QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ:
+		return QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ:
+		return QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ;
+	case COMPAT_QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ:
+		return QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ:
+		return QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP_64:
+		return QSEECOM_IOCTL_SEND_MODFD_RESP_64;
+
+	default:
+		return cmd;
+	}
+}
+
+long compat_qseecom_ioctl(struct file *file,
+		unsigned int cmd, unsigned long arg)
+{
+	long ret;
+
+	switch (cmd) {
+
+	case COMPAT_QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ:
+	case COMPAT_QSEECOM_IOCTL_RECEIVE_REQ:
+	case COMPAT_QSEECOM_IOCTL_SEND_RESP_REQ:
+	case COMPAT_QSEECOM_IOCTL_UNLOAD_APP_REQ:
+	case COMPAT_QSEECOM_IOCTL_PERF_ENABLE_REQ:
+	case COMPAT_QSEECOM_IOCTL_PERF_DISABLE_REQ:
+	case COMPAT_QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
+		return qseecom_ioctl(file, convert_cmd(cmd), 0);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
+		struct compat_qseecom_register_listener_req __user *data32;
+		struct qseecom_register_listener_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_register_listener_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_LOAD_APP_REQ: {
+		struct compat_qseecom_load_img_req __user *data32;
+		struct qseecom_load_img_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_load_img_req(data32, data);
+		if (err)
+			return err;
+
+		ret = qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+		err = compat_put_qseecom_load_img_req(data32, data);
+		return ret ? ret : err;
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SEND_CMD_REQ: {
+		struct compat_qseecom_send_cmd_req __user *data32;
+		struct qseecom_send_cmd_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_send_cmd_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
+		struct compat_qseecom_send_modfd_cmd_req __user *data32;
+		struct qseecom_send_modfd_cmd_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_send_modfd_cmd_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
+		struct compat_qseecom_set_sb_mem_param_req __user *data32;
+		struct qseecom_set_sb_mem_param_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_set_sb_mem_param_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
+		struct compat_qseecom_qseos_version_req __user *data32;
+		struct qseecom_qseos_version_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_qseos_version_req(data32, data);
+		if (err)
+			return err;
+
+		ret = qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+		err = compat_put_qseecom_qseos_version_req(data32, data);
+
+		return ret ? ret : err;
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
+		compat_int_t __user *data32;
+		int __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+		err = compat_get_int(data32, data);
+		if (err)
+			return err;
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
+		struct compat_qseecom_load_img_req __user *data32;
+		struct qseecom_load_img_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_load_img_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
+		struct compat_qseecom_qseos_app_load_query __user *data32;
+		struct qseecom_qseos_app_load_query __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_qseos_app_load_query(data32, data);
+		if (err)
+			return err;
+
+		ret = qseecom_ioctl(file, convert_cmd(cmd),
+					(unsigned long)data);
+		err = compat_put_qseecom_qseos_app_load_query(data32, data);
+		return ret ? ret : err;
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
+		struct compat_qseecom_send_svc_cmd_req __user *data32;
+		struct qseecom_send_svc_cmd_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_send_svc_cmd_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_CREATE_KEY_REQ: {
+		struct compat_qseecom_create_key_req __user *data32;
+		struct qseecom_create_key_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_create_key_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_WIPE_KEY_REQ: {
+		struct compat_qseecom_wipe_key_req __user *data32;
+		struct qseecom_wipe_key_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_wipe_key_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
+		struct compat_qseecom_update_key_userinfo_req __user *data32;
+		struct qseecom_update_key_userinfo_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_update_key_userinfo_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
+		struct compat_qseecom_save_partition_hash_req __user *data32;
+		struct qseecom_save_partition_hash_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_save_partition_hash_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
+		struct compat_qseecom_is_es_activated_req __user *data32;
+		struct qseecom_is_es_activated_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_is_es_activated_req(data32, data);
+		if (err)
+			return err;
+
+		ret = qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+		err = compat_put_qseecom_is_es_activated_req(data32, data);
+		return ret ? ret : err;
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
+		struct compat_qseecom_mdtp_cipher_dip_req __user *data32;
+		struct qseecom_mdtp_cipher_dip_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_mdtp_cipher_dip_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP:
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
+		struct compat_qseecom_send_modfd_listener_resp __user *data32;
+		struct qseecom_send_modfd_listener_resp __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_send_modfd_listener_resp(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
+		struct compat_qseecom_qteec_req __user *data32;
+		struct qseecom_qteec_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_qteec_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ:
+	case COMPAT_QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ:
+	case COMPAT_QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
+		struct compat_qseecom_qteec_modfd_req __user *data32;
+		struct qseecom_qteec_modfd_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_qteec_modfd_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	default:
+		return -ENOIOCTLCMD;
+	break;
+	}
+	return 0;
+}
diff --git a/drivers/misc/compat_qseecom.h b/drivers/misc/compat_qseecom.h
new file mode 100644
index 0000000..fa76d4c
--- /dev/null
+++ b/drivers/misc/compat_qseecom.h
@@ -0,0 +1,333 @@
+#ifndef _UAPI_COMPAT_QSEECOM_H_
+#define _UAPI_COMPAT_QSEECOM_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#if IS_ENABLED(CONFIG_COMPAT)
+#include <linux/compat.h>
+
+/*
+ * struct compat_qseecom_register_listener_req -
+ *      for register listener ioctl request
+ * @listener_id - service id (shared between userspace and QSE)
+ * @ifd_data_fd - ion handle
+ * @virt_sb_base - shared buffer base in user space
+ * @sb_size - shared buffer size
+ */
+struct compat_qseecom_register_listener_req {
+	compat_ulong_t listener_id; /* in */
+	compat_long_t ifd_data_fd; /* in */
+	compat_uptr_t virt_sb_base; /* in */
+	compat_ulong_t sb_size; /* in */
+};
+
+/*
+ * struct compat_qseecom_send_cmd_req - for send command ioctl request
+ * @cmd_req_len - command buffer length
+ * @cmd_req_buf - command buffer
+ * @resp_len - response buffer length
+ * @resp_buf - response buffer
+ */
+struct compat_qseecom_send_cmd_req {
+	compat_uptr_t cmd_req_buf; /* in */
+	compat_uint_t cmd_req_len; /* in */
+	compat_uptr_t resp_buf; /* in/out */
+	compat_uint_t resp_len; /* in/out */
+};
+
+/*
+ * struct qseecom_ion_fd_info - ion fd handle data information
+ * @fd - ion handle to some memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct compat_qseecom_ion_fd_info {
+	compat_long_t fd;
+	compat_ulong_t cmd_buf_offset;
+};
+/*
+ * struct qseecom_send_modfd_cmd_req - for send command ioctl request
+ * @cmd_req_len - command buffer length
+ * @cmd_req_buf - command buffer
+ * @resp_len - response buffer length
+ * @resp_buf - response buffer
+ * @ifd_data_fd - ion handle to memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct compat_qseecom_send_modfd_cmd_req {
+	compat_uptr_t cmd_req_buf; /* in */
+	compat_uint_t cmd_req_len; /* in */
+	compat_uptr_t resp_buf; /* in/out */
+	compat_uint_t resp_len; /* in/out */
+	struct compat_qseecom_ion_fd_info ifd_data[MAX_ION_FD];
+};
+
+/*
+ * struct compat_qseecom_listener_send_resp_req
+ * signal to continue the send_cmd req.
+ * Used as a trigger from HLOS service to notify QSEECOM that it's done with its
+ * operation and provide the response for QSEECOM can continue the incomplete
+ * command execution
+ * @resp_len - Length of the response
+ * @resp_buf - Response buffer where the response of the cmd should go.
+ */
+struct compat_qseecom_send_resp_req {
+	compat_uptr_t resp_buf; /* in */
+	compat_uint_t resp_len; /* in */
+};
+
+/*
+ * struct compat_qseecom_load_img_data
+ * for sending image length information and
+ * ion file descriptor to the qseecom driver. ion file descriptor is used
+ * for retrieving the ion file handle and in turn the physical address of
+ * the image location.
+ * @mdt_len - Length of the .mdt file in bytes.
+ * @img_len - Length of the .mdt + .b00 +..+.bxx images files in bytes
+ * @ion_fd - Ion file descriptor used when allocating memory.
+ * @img_name - Name of the image.
+ */
+struct compat_qseecom_load_img_req {
+	compat_ulong_t mdt_len; /* in */
+	compat_ulong_t img_len; /* in */
+	compat_long_t  ifd_data_fd; /* in */
+	char	 img_name[MAX_APP_NAME_SIZE]; /* in */
+	compat_ulong_t app_arch; /* in */
+	compat_uint_t app_id; /* out*/
+};
+
+struct compat_qseecom_set_sb_mem_param_req {
+	compat_long_t ifd_data_fd; /* in */
+	compat_uptr_t virt_sb_base; /* in */
+	compat_ulong_t sb_len; /* in */
+};
+
+/*
+ * struct compat_qseecom_qseos_version_req - get qseos version
+ * @qseos_version - version number
+ */
+struct compat_qseecom_qseos_version_req {
+	compat_uint_t qseos_version; /* in */
+};
+
+/*
+ * struct compat_qseecom_qseos_app_load_query - verify if app is loaded in qsee
+ * @app_name[MAX_APP_NAME_SIZE]-  name of the app.
+ * @app_id - app id.
+ */
+struct compat_qseecom_qseos_app_load_query {
+	char app_name[MAX_APP_NAME_SIZE]; /* in */
+	compat_uint_t app_id; /* out */
+	compat_ulong_t app_arch;
+};
+
+struct compat_qseecom_send_svc_cmd_req {
+	compat_ulong_t cmd_id;
+	compat_uptr_t cmd_req_buf; /* in */
+	compat_uint_t cmd_req_len; /* in */
+	compat_uptr_t resp_buf; /* in/out */
+	compat_uint_t resp_len; /* in/out */
+};
+
+struct compat_qseecom_create_key_req {
+	unsigned char hash32[QSEECOM_HASH_SIZE];
+	enum qseecom_key_management_usage_type usage;
+};
+
+struct compat_qseecom_wipe_key_req {
+	enum qseecom_key_management_usage_type usage;
+	compat_int_t wipe_key_flag;
+};
+
+struct compat_qseecom_update_key_userinfo_req {
+	unsigned char current_hash32[QSEECOM_HASH_SIZE];
+	unsigned char new_hash32[QSEECOM_HASH_SIZE];
+	enum qseecom_key_management_usage_type usage;
+};
+
+/*
+ * struct compat_qseecom_save_partition_hash_req
+ * @partition_id - partition id.
+ * @hash[SHA256_DIGEST_LENGTH] -  sha256 digest.
+ */
+struct compat_qseecom_save_partition_hash_req {
+	compat_int_t partition_id; /* in */
+	char digest[SHA256_DIGEST_LENGTH]; /* in */
+};
+
+/*
+ * struct compat_qseecom_is_es_activated_req
+ * @is_activated - 1=true , 0=false
+ */
+struct compat_qseecom_is_es_activated_req {
+	compat_int_t is_activated; /* out */
+};
+
+/*
+ * struct compat_qseecom_mdtp_cipher_dip_req
+ * @in_buf - input buffer
+ * @in_buf_size - input buffer size
+ * @out_buf - output buffer
+ * @out_buf_size - output buffer size
+ * @direction - 0=encrypt, 1=decrypt
+ */
+struct compat_qseecom_mdtp_cipher_dip_req {
+	compat_uptr_t in_buf;
+	compat_uint_t in_buf_size;
+	compat_uptr_t out_buf;
+	compat_uint_t out_buf_size;
+	compat_uint_t direction;
+};
+
+/*
+ * struct qseecom_send_modfd_resp - for send command ioctl request
+ * @req_len - command buffer length
+ * @req_buf - command buffer
+ * @ifd_data_fd - ion handle to memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct compat_qseecom_send_modfd_listener_resp {
+	compat_uptr_t resp_buf_ptr; /* in */
+	compat_uint_t resp_len; /* in */
+	struct compat_qseecom_ion_fd_info ifd_data[MAX_ION_FD]; /* in */
+};
+
+struct compat_qseecom_qteec_req {
+	compat_uptr_t req_ptr;
+	compat_ulong_t req_len;
+	compat_uptr_t resp_ptr;
+	compat_ulong_t resp_len;
+};
+
+struct compat_qseecom_qteec_modfd_req {
+	compat_uptr_t req_ptr;
+	compat_ulong_t req_len;
+	compat_uptr_t resp_ptr;
+	compat_ulong_t resp_len;
+	struct compat_qseecom_ion_fd_info ifd_data[MAX_ION_FD];
+};
+
+struct compat_qseecom_ce_pipe_entry {
+	compat_int_t valid;
+	compat_uint_t ce_num;
+	compat_uint_t ce_pipe_pair;
+};
+
+struct compat_qseecom_ce_info_req {
+	unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
+	compat_uint_t usage;
+	compat_uint_t unit_num;
+	compat_uint_t num_ce_pipe_entries;
+	struct compat_qseecom_ce_pipe_entry
+				ce_pipe_entry[MAX_CE_PIPE_PAIR_PER_UNIT];
+};
+
+struct file;
+extern long compat_qseecom_ioctl(struct file *file,
+					unsigned int cmd, unsigned long arg);
+
+#define COMPAT_QSEECOM_IOCTL_REGISTER_LISTENER_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 1, struct compat_qseecom_register_listener_req)
+
+#define COMPAT_QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 2)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 3, struct compat_qseecom_send_cmd_req)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 4, struct compat_qseecom_send_modfd_cmd_req)
+
+#define COMPAT_QSEECOM_IOCTL_RECEIVE_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 5)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_RESP_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 6)
+
+#define COMPAT_QSEECOM_IOCTL_LOAD_APP_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 7, struct compat_qseecom_load_img_req)
+
+#define COMPAT_QSEECOM_IOCTL_SET_MEM_PARAM_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 8, struct compat_qseecom_set_sb_mem_param_req)
+
+#define COMPAT_QSEECOM_IOCTL_UNLOAD_APP_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 9)
+
+#define COMPAT_QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 10, struct compat_qseecom_qseos_version_req)
+
+#define COMPAT_QSEECOM_IOCTL_PERF_ENABLE_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 11)
+
+#define COMPAT_QSEECOM_IOCTL_PERF_DISABLE_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 12)
+
+#define COMPAT_QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 13, struct compat_qseecom_load_img_req)
+
+#define COMPAT_QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 14)
+
+#define COMPAT_QSEECOM_IOCTL_APP_LOADED_QUERY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 15, struct compat_qseecom_qseos_app_load_query)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 16, struct compat_qseecom_send_svc_cmd_req)
+
+#define COMPAT_QSEECOM_IOCTL_CREATE_KEY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 17, struct compat_qseecom_create_key_req)
+
+#define COMPAT_QSEECOM_IOCTL_WIPE_KEY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 18, struct compat_qseecom_wipe_key_req)
+
+#define COMPAT_QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 19, \
+				struct compat_qseecom_save_partition_hash_req)
+
+#define COMPAT_QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 20, struct compat_qseecom_is_es_activated_req)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP \
+	_IOWR(QSEECOM_IOC_MAGIC, 21, \
+				struct compat_qseecom_send_modfd_listener_resp)
+
+#define COMPAT_QSEECOM_IOCTL_SET_BUS_SCALING_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 23, int)
+
+#define COMPAT_QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 24, \
+			struct compat_qseecom_update_key_userinfo_req)
+
+#define COMPAT_QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 30, struct compat_qseecom_qteec_modfd_req)
+
+#define COMPAT_QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 31, struct compat_qseecom_qteec_req)
+
+#define COMPAT_QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 32, struct compat_qseecom_qteec_modfd_req)
+
+#define COMPAT_QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 33, struct compat_qseecom_qteec_modfd_req)
+
+#define COMPAT_QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 34, struct qseecom_mdtp_cipher_dip_req)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 35, struct compat_qseecom_send_modfd_cmd_req)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP_64 \
+	_IOWR(QSEECOM_IOC_MAGIC, 36, \
+				struct compat_qseecom_send_modfd_listener_resp)
+#define COMPAT_QSEECOM_IOCTL_GET_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 40, \
+				struct compat_qseecom_ce_info_req)
+#define COMPAT_QSEECOM_IOCTL_FREE_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 41, \
+				struct compat_qseecom_ce_info_req)
+#define COMPAT_QSEECOM_IOCTL_QUERY_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 42, \
+				struct compat_qseecom_ce_info_req)
+
+#endif
+#endif /* _UAPI_COMPAT_QSEECOM_H_ */
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
new file mode 100644
index 0000000..2c02d2d
--- /dev/null
+++ b/drivers/misc/qseecom.c
@@ -0,0 +1,8926 @@
+/*
+ * QTI Secure Execution Environment Communicator (QSEECOM) driver
+ *
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/msm_ion.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/qseecom.h>
+#include <linux/elf.h>
+#include <linux/firmware.h>
+#include <linux/freezer.h>
+#include <linux/scatterlist.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/socinfo.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <soc/qcom/qseecomi.h>
+#include <asm/cacheflush.h>
+#include "qseecom_kernel.h"
+#include <crypto/ice.h>
+#include <linux/delay.h>
+
+#include <linux/compat.h>
+#include "compat_qseecom.h"
+
+#define QSEECOM_DEV			"qseecom"
+#define QSEOS_VERSION_14		0x14
+#define QSEEE_VERSION_00		0x400000
+#define QSEE_VERSION_01			0x401000
+#define QSEE_VERSION_02			0x402000
+#define QSEE_VERSION_03			0x403000
+#define QSEE_VERSION_04			0x404000
+#define QSEE_VERSION_05			0x405000
+#define QSEE_VERSION_20			0x800000
+#define QSEE_VERSION_40			0x1000000  /* TZ.BF.4.0 */
+
+#define QSEE_CE_CLK_100MHZ		100000000
+#define CE_CLK_DIV			1000000
+
+#define QSEECOM_MAX_SG_ENTRY			512
+#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT	\
+			(QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
+
+#define QSEECOM_INVALID_KEY_ID  0xff
+
+/* Save partition image hash for authentication check */
+#define	SCM_SAVE_PARTITION_HASH_ID	0x01
+
+/* Check if enterprise security is activate */
+#define	SCM_IS_ACTIVATED_ID		0x02
+
+/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
+#define SCM_MDTP_CIPHER_DIP		0x01
+
+/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
+#define MAX_DIP			0x20000
+
+#define RPMB_SERVICE			0x2000
+#define SSD_SERVICE			0x3000
+
+#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT	2000
+#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT	2000
+#define TWO 2
+#define QSEECOM_UFS_ICE_CE_NUM 10
+#define QSEECOM_SDCC_ICE_CE_NUM 20
+#define QSEECOM_ICE_FDE_KEY_INDEX 0
+
+#define PHY_ADDR_4G	(1ULL<<32)
+
+#define QSEECOM_STATE_NOT_READY         0
+#define QSEECOM_STATE_SUSPEND           1
+#define QSEECOM_STATE_READY             2
+#define QSEECOM_ICE_FDE_KEY_SIZE_MASK   2
+
+/*
+ * default ce info unit to 0 for
+ * services which
+ * support only single instance.
+ * Most of services are in this category.
+ */
+#define DEFAULT_CE_INFO_UNIT 0
+#define DEFAULT_NUM_CE_INFO_UNIT 1
+
+enum qseecom_clk_definitions {
+	CLK_DFAB = 0,
+	CLK_SFPB,
+};
+
+enum qseecom_ice_key_size_type {
+	QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
+		(0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+	QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
+		(1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+	QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
+		(0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+};
+
+enum qseecom_client_handle_type {
+	QSEECOM_CLIENT_APP = 1,
+	QSEECOM_LISTENER_SERVICE,
+	QSEECOM_SECURE_SERVICE,
+	QSEECOM_GENERIC,
+	QSEECOM_UNAVAILABLE_CLIENT_APP,
+};
+
+enum qseecom_ce_hw_instance {
+	CLK_QSEE = 0,
+	CLK_CE_DRV,
+	CLK_INVALID,
+};
+
+static struct class *driver_class;
+static dev_t qseecom_device_no;
+
+static DEFINE_MUTEX(qsee_bw_mutex);
+static DEFINE_MUTEX(app_access_lock);
+static DEFINE_MUTEX(clk_access_lock);
+
+struct sglist_info {
+	uint32_t indexAndFlags;
+	uint32_t sizeOrCount;
+};
+
+/*
+ * The 31th bit indicates only one or multiple physical address inside
+ * the request buffer. If it is set,  the index locates a single physical addr
+ * inside the request buffer, and `sizeOrCount` is the size of the memory being
+ * shared at that physical address.
+ * Otherwise, the index locates an array of {start, len} pairs (a
+ * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
+ * that array.
+ *
+ * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
+ * and scatter gather entry sizes are 64-bit values.  Otherwise, 32-bit values.
+ *
+ * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
+ */
+#define SGLISTINFO_SET_INDEX_FLAG(c, s, i)	\
+	((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
+
+#define SGLISTINFO_TABLE_SIZE	(sizeof(struct sglist_info) * MAX_ION_FD)
+
+#define FEATURE_ID_WHITELIST	15	/*whitelist feature id*/
+
+#define MAKE_WHITELIST_VERSION(major, minor, patch) \
+	(((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
+
+struct qseecom_registered_listener_list {
+	struct list_head                 list;
+	struct qseecom_register_listener_req svc;
+	void  *user_virt_sb_base;
+	u8 *sb_virt;
+	phys_addr_t sb_phys;
+	size_t sb_length;
+	struct ion_handle *ihandle; /* Retrieve phy addr */
+	wait_queue_head_t          rcv_req_wq;
+	int                        rcv_req_flag;
+	int                        send_resp_flag;
+	bool                       listener_in_use;
+	/* wq for thread blocked on this listener*/
+	wait_queue_head_t          listener_block_app_wq;
+	struct sglist_info sglistinfo_ptr[MAX_ION_FD];
+	uint32_t sglist_cnt;
+};
+
+struct qseecom_registered_app_list {
+	struct list_head                 list;
+	u32  app_id;
+	u32  ref_cnt;
+	char app_name[MAX_APP_NAME_SIZE];
+	u32  app_arch;
+	bool app_blocked;
+	u32  blocked_on_listener_id;
+};
+
+struct qseecom_registered_kclient_list {
+	struct list_head list;
+	struct qseecom_handle *handle;
+};
+
+struct qseecom_ce_info_use {
+	unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
+	unsigned int unit_num;
+	unsigned int num_ce_pipe_entries;
+	struct qseecom_ce_pipe_entry *ce_pipe_entry;
+	bool alloc;
+	uint32_t type;
+};
+
+struct ce_hw_usage_info {
+	uint32_t qsee_ce_hw_instance;
+	uint32_t num_fde;
+	struct qseecom_ce_info_use *fde;
+	uint32_t num_pfe;
+	struct qseecom_ce_info_use *pfe;
+};
+
+struct qseecom_clk {
+	enum qseecom_ce_hw_instance instance;
+	struct clk *ce_core_clk;
+	struct clk *ce_clk;
+	struct clk *ce_core_src_clk;
+	struct clk *ce_bus_clk;
+	uint32_t clk_access_cnt;
+};
+
+struct qseecom_control {
+	struct ion_client *ion_clnt;		/* Ion client */
+	struct list_head  registered_listener_list_head;
+	spinlock_t        registered_listener_list_lock;
+
+	struct list_head  registered_app_list_head;
+	spinlock_t        registered_app_list_lock;
+
+	struct list_head   registered_kclient_list_head;
+	spinlock_t        registered_kclient_list_lock;
+
+	wait_queue_head_t send_resp_wq;
+	int               send_resp_flag;
+
+	uint32_t          qseos_version;
+	uint32_t          qsee_version;
+	struct device *pdev;
+	bool  whitelist_support;
+	bool  commonlib_loaded;
+	bool  commonlib64_loaded;
+	struct ion_handle *cmnlib_ion_handle;
+	struct ce_hw_usage_info ce_info;
+
+	int qsee_bw_count;
+	int qsee_sfpb_bw_count;
+
+	uint32_t qsee_perf_client;
+	struct qseecom_clk qsee;
+	struct qseecom_clk ce_drv;
+
+	bool support_bus_scaling;
+	bool support_fde;
+	bool support_pfe;
+	bool fde_key_size;
+	uint32_t  cumulative_mode;
+	enum qseecom_bandwidth_request_mode  current_mode;
+	struct timer_list bw_scale_down_timer;
+	struct work_struct bw_inactive_req_ws;
+	struct cdev cdev;
+	bool timer_running;
+	bool no_clock_support;
+	unsigned int ce_opp_freq_hz;
+	bool appsbl_qseecom_support;
+	uint32_t qsee_reentrancy_support;
+
+	uint32_t app_block_ref_cnt;
+	wait_queue_head_t app_block_wq;
+	atomic_t qseecom_state;
+	int is_apps_region_protected;
+};
+
+struct qseecom_sec_buf_fd_info {
+	bool is_sec_buf_fd;
+	size_t size;
+	void *vbase;
+	dma_addr_t pbase;
+};
+
+struct qseecom_param_memref {
+	uint32_t buffer;
+	uint32_t size;
+};
+
+struct qseecom_client_handle {
+	u32  app_id;
+	u8 *sb_virt;
+	phys_addr_t sb_phys;
+	unsigned long user_virt_sb_base;
+	size_t sb_length;
+	struct ion_handle *ihandle;		/* Retrieve phy addr */
+	char app_name[MAX_APP_NAME_SIZE];
+	u32  app_arch;
+	struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
+};
+
+struct qseecom_listener_handle {
+	u32               id;
+};
+
+static struct qseecom_control qseecom;
+
+struct qseecom_dev_handle {
+	enum qseecom_client_handle_type type;
+	union {
+		struct qseecom_client_handle client;
+		struct qseecom_listener_handle listener;
+	};
+	bool released;
+	int               abort;
+	wait_queue_head_t abort_wq;
+	atomic_t          ioctl_count;
+	bool  perf_enabled;
+	bool  fast_load_enabled;
+	enum qseecom_bandwidth_request_mode mode;
+	struct sglist_info sglistinfo_ptr[MAX_ION_FD];
+	uint32_t sglist_cnt;
+	bool use_legacy_cmd;
+};
+
+struct qseecom_key_id_usage_desc {
+	uint8_t desc[QSEECOM_KEY_ID_SIZE];
+};
+
+struct qseecom_crypto_info {
+	unsigned int unit_num;
+	unsigned int ce;
+	unsigned int pipe_pair;
+};
+
+static struct qseecom_key_id_usage_desc key_id_array[] = {
+	{
+		.desc = "Undefined Usage Index",
+	},
+
+	{
+		.desc = "Full Disk Encryption",
+	},
+
+	{
+		.desc = "Per File Encryption",
+	},
+
+	{
+		.desc = "UFS ICE Full Disk Encryption",
+	},
+
+	{
+		.desc = "SDCC ICE Full Disk Encryption",
+	},
+};
+
+/* Function proto types */
+static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
+static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
+static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
+static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
+static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
+static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
+					char *cmnlib_name);
+static int qseecom_enable_ice_setup(int usage);
+static int qseecom_disable_ice_setup(int usage);
+static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
+static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
+						void __user *argp);
+static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
+						void __user *argp);
+static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
+						void __user *argp);
+
+static int get_qseecom_keymaster_status(char *str)
+{
+	get_option(&str, &qseecom.is_apps_region_protected);
+	return 1;
+}
+__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
+
+static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
+			const void *req_buf, void *resp_buf)
+{
+	int      ret = 0;
+	uint32_t smc_id = 0;
+	uint32_t qseos_cmd_id = 0;
+	struct scm_desc desc = {0};
+	struct qseecom_command_scm_resp *scm_resp = NULL;
+
+	if (!req_buf || !resp_buf) {
+		pr_err("Invalid buffer pointer\n");
+		return -EINVAL;
+	}
+	qseos_cmd_id = *(uint32_t *)req_buf;
+	scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
+
+	switch (svc_id) {
+	case 6: {
+		if (tz_cmd_id == 3) {
+			smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
+			desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
+			desc.args[0] = *(uint32_t *)req_buf;
+		} else {
+			pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
+				svc_id, tz_cmd_id);
+			return -EINVAL;
+		}
+		ret = scm_call2(smc_id, &desc);
+		break;
+	}
+	case SCM_SVC_ES: {
+		switch (tz_cmd_id) {
+		case SCM_SAVE_PARTITION_HASH_ID: {
+			u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
+			struct qseecom_save_partition_hash_req *p_hash_req =
+				(struct qseecom_save_partition_hash_req *)
+				req_buf;
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, p_hash_req->digest,
+				SHA256_DIGEST_LENGTH);
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
+			desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
+			desc.args[0] = p_hash_req->partition_id;
+			desc.args[1] = virt_to_phys(tzbuf);
+			desc.args[2] = SHA256_DIGEST_LENGTH;
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		default: {
+			pr_err("tz_cmd_id %d is not supported by scm_call2\n",
+						tz_cmd_id);
+			ret = -EINVAL;
+			break;
+		}
+		} /* end of switch (tz_cmd_id) */
+		break;
+	} /* end of case SCM_SVC_ES */
+	case SCM_SVC_TZSCHEDULER: {
+		switch (qseos_cmd_id) {
+		case QSEOS_APP_START_COMMAND: {
+			struct qseecom_load_app_ireq *req;
+			struct qseecom_load_app_64bit_ireq *req_64bit;
+
+			smc_id = TZ_OS_APP_START_ID;
+			desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_load_app_ireq *)req_buf;
+				desc.args[0] = req->mdt_len;
+				desc.args[1] = req->img_len;
+				desc.args[2] = req->phy_addr;
+			} else {
+				req_64bit =
+					(struct qseecom_load_app_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->mdt_len;
+				desc.args[1] = req_64bit->img_len;
+				desc.args[2] = req_64bit->phy_addr;
+			}
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_APP_SHUTDOWN_COMMAND: {
+			struct qseecom_unload_app_ireq *req;
+
+			req = (struct qseecom_unload_app_ireq *)req_buf;
+			smc_id = TZ_OS_APP_SHUTDOWN_ID;
+			desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
+			desc.args[0] = req->app_id;
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_APP_LOOKUP_COMMAND: {
+			struct qseecom_check_app_ireq *req;
+			u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+			if (!tzbuf)
+				return -ENOMEM;
+			req = (struct qseecom_check_app_ireq *)req_buf;
+			pr_debug("Lookup app_name = %s\n", req->app_name);
+			strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_OS_APP_LOOKUP_ID;
+			desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
+			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[1] = strlen(req->app_name);
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		case QSEOS_APP_REGION_NOTIFICATION: {
+			struct qsee_apps_region_info_ireq *req;
+			struct qsee_apps_region_info_64bit_ireq *req_64bit;
+
+			smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
+			desc.arginfo =
+				TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qsee_apps_region_info_ireq *)
+					req_buf;
+				desc.args[0] = req->addr;
+				desc.args[1] = req->size;
+			} else {
+				req_64bit =
+				(struct qsee_apps_region_info_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->addr;
+				desc.args[1] = req_64bit->size;
+			}
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
+			struct qseecom_load_lib_image_ireq *req;
+			struct qseecom_load_lib_image_64bit_ireq *req_64bit;
+
+			smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
+			desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_load_lib_image_ireq *)
+					req_buf;
+				desc.args[0] = req->mdt_len;
+				desc.args[1] = req->img_len;
+				desc.args[2] = req->phy_addr;
+			} else {
+				req_64bit =
+				(struct qseecom_load_lib_image_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->mdt_len;
+				desc.args[1] = req_64bit->img_len;
+				desc.args[2] = req_64bit->phy_addr;
+			}
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
+			smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
+			desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_REGISTER_LISTENER: {
+			struct qseecom_register_listener_ireq *req;
+			struct qseecom_register_listener_64bit_ireq *req_64bit;
+
+			desc.arginfo =
+				TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_register_listener_ireq *)
+					req_buf;
+				desc.args[0] = req->listener_id;
+				desc.args[1] = req->sb_ptr;
+				desc.args[2] = req->sb_len;
+			} else {
+				req_64bit =
+				(struct qseecom_register_listener_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->listener_id;
+				desc.args[1] = req_64bit->sb_ptr;
+				desc.args[2] = req_64bit->sb_len;
+			}
+			smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			if (ret) {
+				smc_id = TZ_OS_REGISTER_LISTENER_ID;
+				__qseecom_reentrancy_check_if_no_app_blocked(
+					smc_id);
+				ret = scm_call2(smc_id, &desc);
+			}
+			break;
+		}
+		case QSEOS_DEREGISTER_LISTENER: {
+			struct qseecom_unregister_listener_ireq *req;
+
+			req = (struct qseecom_unregister_listener_ireq *)
+				req_buf;
+			smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
+			desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
+			desc.args[0] = req->listener_id;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_LISTENER_DATA_RSP_COMMAND: {
+			struct qseecom_client_listener_data_irsp *req;
+
+			req = (struct qseecom_client_listener_data_irsp *)
+				req_buf;
+			smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
+			desc.arginfo =
+				TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
+			desc.args[0] = req->listener_id;
+			desc.args[1] = req->status;
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
+			struct qseecom_client_listener_data_irsp *req;
+			struct qseecom_client_listener_data_64bit_irsp *req_64;
+
+			smc_id =
+			TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
+			desc.arginfo =
+			TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req =
+				(struct qseecom_client_listener_data_irsp *)
+				req_buf;
+				desc.args[0] = req->listener_id;
+				desc.args[1] = req->status;
+				desc.args[2] = req->sglistinfo_ptr;
+				desc.args[3] = req->sglistinfo_len;
+			} else {
+				req_64 =
+			(struct qseecom_client_listener_data_64bit_irsp *)
+				req_buf;
+				desc.args[0] = req_64->listener_id;
+				desc.args[1] = req_64->status;
+				desc.args[2] = req_64->sglistinfo_ptr;
+				desc.args[3] = req_64->sglistinfo_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
+			struct qseecom_load_app_ireq *req;
+			struct qseecom_load_app_64bit_ireq *req_64bit;
+
+			smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
+			desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_load_app_ireq *)req_buf;
+				desc.args[0] = req->mdt_len;
+				desc.args[1] = req->img_len;
+				desc.args[2] = req->phy_addr;
+			} else {
+				req_64bit =
+				(struct qseecom_load_app_64bit_ireq *)req_buf;
+				desc.args[0] = req_64bit->mdt_len;
+				desc.args[1] = req_64bit->img_len;
+				desc.args[2] = req_64bit->phy_addr;
+			}
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
+			smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
+			desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+			}
+
+		case QSEOS_CLIENT_SEND_DATA_COMMAND: {
+			struct qseecom_client_send_data_ireq *req;
+			struct qseecom_client_send_data_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
+			desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_client_send_data_ireq *)
+					req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->rsp_ptr;
+				desc.args[4] = req->rsp_len;
+			} else {
+				req_64bit =
+				(struct qseecom_client_send_data_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->rsp_ptr;
+				desc.args[4] = req_64bit->rsp_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
+			struct qseecom_client_send_data_ireq *req;
+			struct qseecom_client_send_data_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
+			desc.arginfo =
+			TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_client_send_data_ireq *)
+					req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->rsp_ptr;
+				desc.args[4] = req->rsp_len;
+				desc.args[5] = req->sglistinfo_ptr;
+				desc.args[6] = req->sglistinfo_len;
+			} else {
+				req_64bit =
+				(struct qseecom_client_send_data_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->rsp_ptr;
+				desc.args[4] = req_64bit->rsp_len;
+				desc.args[5] = req_64bit->sglistinfo_ptr;
+				desc.args[6] = req_64bit->sglistinfo_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
+			struct qseecom_client_send_service_ireq *req;
+
+			req = (struct qseecom_client_send_service_ireq *)
+				req_buf;
+			smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
+			desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
+			desc.args[0] = req->key_type;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_RPMB_ERASE_COMMAND: {
+			smc_id = TZ_OS_RPMB_ERASE_ID;
+			desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
+			smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
+			desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_GENERATE_KEY: {
+			u32 tzbuflen = PAGE_ALIGN(sizeof
+				(struct qseecom_key_generate_ireq) -
+				sizeof(uint32_t));
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, req_buf + sizeof(uint32_t),
+				(sizeof(struct qseecom_key_generate_ireq) -
+				sizeof(uint32_t)));
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_OS_KS_GEN_KEY_ID;
+			desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
+			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[1] = tzbuflen;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		case QSEOS_DELETE_KEY: {
+			u32 tzbuflen = PAGE_ALIGN(sizeof
+				(struct qseecom_key_delete_ireq) -
+				sizeof(uint32_t));
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, req_buf + sizeof(uint32_t),
+				(sizeof(struct qseecom_key_delete_ireq) -
+				sizeof(uint32_t)));
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_OS_KS_DEL_KEY_ID;
+			desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
+			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[1] = tzbuflen;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		case QSEOS_SET_KEY: {
+			u32 tzbuflen = PAGE_ALIGN(sizeof
+				(struct qseecom_key_select_ireq) -
+				sizeof(uint32_t));
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, req_buf + sizeof(uint32_t),
+				(sizeof(struct qseecom_key_select_ireq) -
+				sizeof(uint32_t)));
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
+			desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
+			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[1] = tzbuflen;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		case QSEOS_UPDATE_KEY_USERINFO: {
+			u32 tzbuflen = PAGE_ALIGN(sizeof
+				(struct qseecom_key_userinfo_update_ireq) -
+				sizeof(uint32_t));
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
+				(struct qseecom_key_userinfo_update_ireq) -
+				sizeof(uint32_t)));
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_OS_KS_UPDATE_KEY_ID;
+			desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
+			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[1] = tzbuflen;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		case QSEOS_TEE_OPEN_SESSION: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
+			desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
+			desc.arginfo =
+			TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+				desc.args[5] = req->sglistinfo_ptr;
+				desc.args[6] = req->sglistinfo_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+				desc.args[5] = req_64bit->sglistinfo_ptr;
+				desc.args[6] = req_64bit->sglistinfo_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_INVOKE_COMMAND: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
+			desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
+			desc.arginfo =
+			TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+				desc.args[5] = req->sglistinfo_ptr;
+				desc.args[6] = req->sglistinfo_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+				desc.args[5] = req_64bit->sglistinfo_ptr;
+				desc.args[6] = req_64bit->sglistinfo_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_CLOSE_SESSION: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
+			desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_REQUEST_CANCELLATION: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
+			desc.arginfo =
+				TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
+			struct qseecom_continue_blocked_request_ireq *req =
+				(struct qseecom_continue_blocked_request_ireq *)
+				req_buf;
+			smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
+			desc.arginfo =
+				TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
+			desc.args[0] = req->app_id;
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		default: {
+			pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
+						qseos_cmd_id);
+			ret = -EINVAL;
+			break;
+		}
+		} /*end of switch (qsee_cmd_id)  */
+	break;
+	} /*end of case SCM_SVC_TZSCHEDULER*/
+	default: {
+		pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
+					svc_id);
+		ret = -EINVAL;
+		break;
+	}
+	} /*end of switch svc_id */
+	scm_resp->result = desc.ret[0];
+	scm_resp->resp_type = desc.ret[1];
+	scm_resp->data = desc.ret[2];
+	pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
+		svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
+	pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
+		scm_resp->result, scm_resp->resp_type, scm_resp->data);
+	return ret;
+}
+
+
+static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
+		size_t cmd_len, void *resp_buf, size_t resp_len)
+{
+	if (!is_scm_armv8())
+		return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
+				resp_buf, resp_len);
+	else
+		return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
+}
+
+static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
+		struct qseecom_register_listener_req *svc)
+{
+	struct qseecom_registered_listener_list *ptr;
+	int unique = 1;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+	list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
+		if (ptr->svc.listener_id == svc->listener_id) {
+			pr_err("Service id: %u is already registered\n",
+					ptr->svc.listener_id);
+			unique = 0;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+	return unique;
+}
+
+static struct qseecom_registered_listener_list *__qseecom_find_svc(
+						int32_t listener_id)
+{
+	struct qseecom_registered_listener_list *entry = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+	list_for_each_entry(entry,
+			&qseecom.registered_listener_list_head, list) {
+		if (entry->svc.listener_id == listener_id)
+			break;
+	}
+	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+
+	if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
+		pr_err("Service id: %u is not found\n", listener_id);
+		return NULL;
+	}
+
+	return entry;
+}
+
+static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
+				struct qseecom_dev_handle *handle,
+				struct qseecom_register_listener_req *listener)
+{
+	int ret = 0;
+	struct qseecom_register_listener_ireq req;
+	struct qseecom_register_listener_64bit_ireq req_64bit;
+	struct qseecom_command_scm_resp resp;
+	ion_phys_addr_t pa;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+
+	/* Get the handle of the shared fd */
+	svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+					listener->ifd_data_fd);
+	if (IS_ERR_OR_NULL(svc->ihandle)) {
+		pr_err("Ion client could not retrieve the handle\n");
+		return -ENOMEM;
+	}
+
+	/* Get the physical address of the ION BUF */
+	ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
+	if (ret) {
+		pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+			ret);
+		return ret;
+	}
+	/* Populate the structure for sending scm call to load image */
+	svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
+	if (IS_ERR_OR_NULL(svc->sb_virt)) {
+		pr_err("ION memory mapping for listener shared buffer failed\n");
+		return -ENOMEM;
+	}
+	svc->sb_phys = (phys_addr_t)pa;
+
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
+		req.listener_id = svc->svc.listener_id;
+		req.sb_len = svc->sb_length;
+		req.sb_ptr = (uint32_t)svc->sb_phys;
+		cmd_buf = (void *)&req;
+		cmd_len = sizeof(struct qseecom_register_listener_ireq);
+	} else {
+		req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
+		req_64bit.listener_id = svc->svc.listener_id;
+		req_64bit.sb_len = svc->sb_length;
+		req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
+		cmd_buf = (void *)&req_64bit;
+		cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
+	}
+
+	resp.result = QSEOS_RESULT_INCOMPLETE;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+					 &resp, sizeof(resp));
+	if (ret) {
+		pr_err("qseecom_scm_call failed with err: %d\n", ret);
+		return -EINVAL;
+	}
+
+	if (resp.result != QSEOS_RESULT_SUCCESS) {
+		pr_err("Error SB registration req: resp.result = %d\n",
+			resp.result);
+		return -EPERM;
+	}
+	return 0;
+}
+
+static int qseecom_register_listener(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+	int ret = 0;
+	unsigned long flags;
+	struct qseecom_register_listener_req rcvd_lstnr;
+	struct qseecom_registered_listener_list *new_entry;
+
+	ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
+			rcvd_lstnr.sb_size))
+		return -EFAULT;
+
+	data->listener.id = 0;
+	if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
+		pr_err("Service is not unique and is already registered\n");
+		data->released = true;
+		return -EBUSY;
+	}
+
+	new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
+	if (!new_entry)
+		return -ENOMEM;
+	memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
+	new_entry->rcv_req_flag = 0;
+
+	new_entry->svc.listener_id = rcvd_lstnr.listener_id;
+	new_entry->sb_length = rcvd_lstnr.sb_size;
+	new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
+	if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
+		pr_err("qseecom_set_sb_memoryfailed\n");
+		kzfree(new_entry);
+		return -ENOMEM;
+	}
+
+	data->listener.id = rcvd_lstnr.listener_id;
+	init_waitqueue_head(&new_entry->rcv_req_wq);
+	init_waitqueue_head(&new_entry->listener_block_app_wq);
+	new_entry->send_resp_flag = 0;
+	new_entry->listener_in_use = false;
+	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+	list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
+	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+
+	return ret;
+}
+
+static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+	unsigned long flags;
+	uint32_t unmap_mem = 0;
+	struct qseecom_register_listener_ireq req;
+	struct qseecom_registered_listener_list *ptr_svc = NULL;
+	struct qseecom_command_scm_resp resp;
+	struct ion_handle *ihandle = NULL;		/* Retrieve phy addr */
+
+	req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
+	req.listener_id = data->listener.id;
+	resp.result = QSEOS_RESULT_INCOMPLETE;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+					sizeof(req), &resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
+				ret, data->listener.id);
+		return ret;
+	}
+
+	if (resp.result != QSEOS_RESULT_SUCCESS) {
+		pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
+				resp.result, data->listener.id);
+		return -EPERM;
+	}
+
+	data->abort = 1;
+	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+	list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head,
+			list) {
+		if (ptr_svc->svc.listener_id == data->listener.id) {
+			wake_up_all(&ptr_svc->rcv_req_wq);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+
+	while (atomic_read(&data->ioctl_count) > 1) {
+		if (wait_event_freezable(data->abort_wq,
+				atomic_read(&data->ioctl_count) <= 1)) {
+			pr_err("Interrupted from abort\n");
+			ret = -ERESTARTSYS;
+			break;
+		}
+	}
+
+	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+	list_for_each_entry(ptr_svc,
+			&qseecom.registered_listener_list_head, list) {
+		if (ptr_svc->svc.listener_id == data->listener.id) {
+			if (ptr_svc->sb_virt) {
+				unmap_mem = 1;
+				ihandle = ptr_svc->ihandle;
+			}
+			list_del(&ptr_svc->list);
+			kzfree(ptr_svc);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+
+	/* Unmap the memory */
+	if (unmap_mem) {
+		if (!IS_ERR_OR_NULL(ihandle)) {
+			ion_unmap_kernel(qseecom.ion_clnt, ihandle);
+			ion_free(qseecom.ion_clnt, ihandle);
+		}
+	}
+	data->released = true;
+	return ret;
+}
+
+static int __qseecom_set_msm_bus_request(uint32_t mode)
+{
+	int ret = 0;
+	struct qseecom_clk *qclk;
+
+	qclk = &qseecom.qsee;
+	if (qclk->ce_core_src_clk != NULL) {
+		if (mode == INACTIVE) {
+			__qseecom_disable_clk(CLK_QSEE);
+		} else {
+			ret = __qseecom_enable_clk(CLK_QSEE);
+			if (ret)
+				pr_err("CLK enabling failed (%d) MODE (%d)\n",
+							ret, mode);
+		}
+	}
+
+	if ((!ret) && (qseecom.current_mode != mode)) {
+		ret = msm_bus_scale_client_update_request(
+					qseecom.qsee_perf_client, mode);
+		if (ret) {
+			pr_err("Bandwidth req failed(%d) MODE (%d)\n",
+							ret, mode);
+			if (qclk->ce_core_src_clk != NULL) {
+				if (mode == INACTIVE) {
+					ret = __qseecom_enable_clk(CLK_QSEE);
+					if (ret)
+						pr_err("CLK enable failed\n");
+				} else
+					__qseecom_disable_clk(CLK_QSEE);
+			}
+		}
+		qseecom.current_mode = mode;
+	}
+	return ret;
+}
+
+static void qseecom_bw_inactive_req_work(struct work_struct *work)
+{
+	mutex_lock(&app_access_lock);
+	mutex_lock(&qsee_bw_mutex);
+	if (qseecom.timer_running)
+		__qseecom_set_msm_bus_request(INACTIVE);
+	pr_debug("current_mode = %d, cumulative_mode = %d\n",
+				qseecom.current_mode, qseecom.cumulative_mode);
+	qseecom.timer_running = false;
+	mutex_unlock(&qsee_bw_mutex);
+	mutex_unlock(&app_access_lock);
+}
+
+static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
+{
+	schedule_work(&qseecom.bw_inactive_req_ws);
+}
+
+static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
+{
+	struct qseecom_clk *qclk;
+	int ret = 0;
+
+	mutex_lock(&clk_access_lock);
+	if (ce == CLK_QSEE)
+		qclk = &qseecom.qsee;
+	else
+		qclk = &qseecom.ce_drv;
+
+	if (qclk->clk_access_cnt > 2) {
+		pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
+		ret = -EINVAL;
+		goto err_dec_ref_cnt;
+	}
+	if (qclk->clk_access_cnt == 2)
+		qclk->clk_access_cnt--;
+
+err_dec_ref_cnt:
+	mutex_unlock(&clk_access_lock);
+	return ret;
+}
+
+
+static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
+{
+	int32_t ret = 0;
+	int32_t request_mode = INACTIVE;
+
+	mutex_lock(&qsee_bw_mutex);
+	if (mode == 0) {
+		if (qseecom.cumulative_mode > MEDIUM)
+			request_mode = HIGH;
+		else
+			request_mode = qseecom.cumulative_mode;
+	} else {
+		request_mode = mode;
+	}
+
+	ret = __qseecom_set_msm_bus_request(request_mode);
+	if (ret) {
+		pr_err("set msm bus request failed (%d),request_mode (%d)\n",
+			ret, request_mode);
+		goto err_scale_timer;
+	}
+
+	if (qseecom.timer_running) {
+		ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
+		if (ret) {
+			pr_err("Failed to decrease clk ref count.\n");
+			goto err_scale_timer;
+		}
+		del_timer_sync(&(qseecom.bw_scale_down_timer));
+		qseecom.timer_running = false;
+	}
+err_scale_timer:
+	mutex_unlock(&qsee_bw_mutex);
+	return ret;
+}
+
+
+static int qseecom_unregister_bus_bandwidth_needs(
+					struct qseecom_dev_handle *data)
+{
+	int32_t ret = 0;
+
+	qseecom.cumulative_mode -= data->mode;
+	data->mode = INACTIVE;
+
+	return ret;
+}
+
+static int __qseecom_register_bus_bandwidth_needs(
+			struct qseecom_dev_handle *data, uint32_t request_mode)
+{
+	int32_t ret = 0;
+
+	if (data->mode == INACTIVE) {
+		qseecom.cumulative_mode += request_mode;
+		data->mode = request_mode;
+	} else {
+		if (data->mode != request_mode) {
+			qseecom.cumulative_mode -= data->mode;
+			qseecom.cumulative_mode += request_mode;
+			data->mode = request_mode;
+		}
+	}
+	return ret;
+}
+
+static int qseecom_perf_enable(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+
+	ret = qsee_vote_for_clock(data, CLK_DFAB);
+	if (ret) {
+		pr_err("Failed to vote for DFAB clock with err %d\n", ret);
+		goto perf_enable_exit;
+	}
+	ret = qsee_vote_for_clock(data, CLK_SFPB);
+	if (ret) {
+		qsee_disable_clock_vote(data, CLK_DFAB);
+		pr_err("Failed to vote for SFPB clock with err %d\n", ret);
+		goto perf_enable_exit;
+	}
+
+perf_enable_exit:
+	return ret;
+}
+
+static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	int32_t ret = 0;
+	int32_t req_mode;
+
+	if (qseecom.no_clock_support)
+		return 0;
+
+	ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	if (req_mode > HIGH) {
+		pr_err("Invalid bandwidth mode (%d)\n", req_mode);
+		return -EINVAL;
+	}
+
+	/*
+	 * Register bus bandwidth needs if bus scaling feature is enabled;
+	 * otherwise, qseecom enable/disable clocks for the client directly.
+	 */
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
+		mutex_unlock(&qsee_bw_mutex);
+	} else {
+		pr_debug("Bus scaling feature is NOT enabled\n");
+		pr_debug("request bandwidth mode %d for the client\n",
+				req_mode);
+		if (req_mode != INACTIVE) {
+			ret = qseecom_perf_enable(data);
+			if (ret)
+				pr_err("Failed to vote for clock with err %d\n",
+						ret);
+		} else {
+			qsee_disable_clock_vote(data, CLK_DFAB);
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		}
+	}
+	return ret;
+}
+
+static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
+{
+	if (qseecom.no_clock_support)
+		return;
+
+	mutex_lock(&qsee_bw_mutex);
+	qseecom.bw_scale_down_timer.expires = jiffies +
+		msecs_to_jiffies(duration);
+	mod_timer(&(qseecom.bw_scale_down_timer),
+		qseecom.bw_scale_down_timer.expires);
+	qseecom.timer_running = true;
+	mutex_unlock(&qsee_bw_mutex);
+}
+
+static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
+{
+	if (!qseecom.support_bus_scaling)
+		qsee_disable_clock_vote(data, CLK_SFPB);
+	else
+		__qseecom_add_bw_scale_down_timer(
+			QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
+}
+
+static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+
+	if (qseecom.support_bus_scaling) {
+		ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
+		if (ret)
+			pr_err("Failed to set bw MEDIUM.\n");
+	} else {
+		ret = qsee_vote_for_clock(data, CLK_SFPB);
+		if (ret)
+			pr_err("Fail vote for clk SFPB ret %d\n", ret);
+	}
+	return ret;
+}
+
+static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	ion_phys_addr_t pa;
+	int32_t ret;
+	struct qseecom_set_sb_mem_param_req req;
+	size_t len;
+
+	/* Copy the relevant information needed for loading the image */
+	if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
+		return -EFAULT;
+
+	if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
+					(req.sb_len == 0)) {
+		pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
+			req.ifd_data_fd, req.sb_len, req.virt_sb_base);
+		return -EFAULT;
+	}
+	if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
+			req.sb_len))
+		return -EFAULT;
+
+	/* Get the handle of the shared fd */
+	data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+						req.ifd_data_fd);
+	if (IS_ERR_OR_NULL(data->client.ihandle)) {
+		pr_err("Ion client could not retrieve the handle\n");
+		return -ENOMEM;
+	}
+	/* Get the physical address of the ION BUF */
+	ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
+	if (ret) {
+
+		pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+			ret);
+		return ret;
+	}
+
+	if (len < req.sb_len) {
+		pr_err("Requested length (0x%x) is > allocated (%zu)\n",
+			req.sb_len, len);
+		return -EINVAL;
+	}
+	/* Populate the structure for sending scm call to load image */
+	data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
+							data->client.ihandle);
+	if (IS_ERR_OR_NULL(data->client.sb_virt)) {
+		pr_err("ION memory mapping for client shared buf failed\n");
+		return -ENOMEM;
+	}
+	data->client.sb_phys = (phys_addr_t)pa;
+	data->client.sb_length = req.sb_len;
+	data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
+	return 0;
+}
+
+static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data)
+{
+	int ret;
+
+	ret = (qseecom.send_resp_flag != 0);
+	return ret || data->abort;
+}
+
+static int __qseecom_reentrancy_listener_has_sent_rsp(
+			struct qseecom_dev_handle *data,
+			struct qseecom_registered_listener_list *ptr_svc)
+{
+	int ret;
+
+	ret = (ptr_svc->send_resp_flag != 0);
+	return ret || data->abort;
+}
+
+static int __qseecom_qseos_fail_return_resp_tz(struct qseecom_dev_handle *data,
+					struct qseecom_command_scm_resp *resp,
+			struct qseecom_client_listener_data_irsp *send_data_rsp,
+			struct qseecom_registered_listener_list *ptr_svc,
+							uint32_t lstnr) {
+	int ret = 0;
+
+	send_data_rsp->status = QSEOS_RESULT_FAILURE;
+	qseecom.send_resp_flag = 0;
+	send_data_rsp->qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
+	send_data_rsp->listener_id = lstnr;
+	if (ptr_svc)
+		pr_warn("listener_id:%x, lstnr: %x\n",
+					ptr_svc->svc.listener_id, lstnr);
+	if (ptr_svc && ptr_svc->ihandle) {
+		ret = msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle,
+					ptr_svc->sb_virt, ptr_svc->sb_length,
+					ION_IOC_CLEAN_INV_CACHES);
+		if (ret) {
+			pr_err("cache operation failed %d\n", ret);
+			return ret;
+		}
+	}
+
+	if (lstnr == RPMB_SERVICE) {
+		ret = __qseecom_enable_clk(CLK_QSEE);
+		if (ret)
+			return ret;
+	}
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, send_data_rsp,
+				sizeof(send_data_rsp), resp, sizeof(*resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+						ret, data->client.app_id);
+		if (lstnr == RPMB_SERVICE)
+			__qseecom_disable_clk(CLK_QSEE);
+		return ret;
+	}
+	if ((resp->result != QSEOS_RESULT_SUCCESS) &&
+			(resp->result != QSEOS_RESULT_INCOMPLETE)) {
+		pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
+				resp->result, data->client.app_id, lstnr);
+		ret = -EINVAL;
+	}
+	if (lstnr == RPMB_SERVICE)
+		__qseecom_disable_clk(CLK_QSEE);
+	return ret;
+}
+
+static void __qseecom_clean_listener_sglistinfo(
+			struct qseecom_registered_listener_list *ptr_svc)
+{
+	if (ptr_svc->sglist_cnt) {
+		memset(ptr_svc->sglistinfo_ptr, 0,
+			SGLISTINFO_TABLE_SIZE);
+		ptr_svc->sglist_cnt = 0;
+	}
+}
+
+static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
+					struct qseecom_command_scm_resp *resp)
+{
+	int ret = 0;
+	int rc = 0;
+	uint32_t lstnr;
+	unsigned long flags;
+	struct qseecom_client_listener_data_irsp send_data_rsp;
+	struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
+	struct qseecom_registered_listener_list *ptr_svc = NULL;
+	sigset_t new_sigset;
+	sigset_t old_sigset;
+	uint32_t status;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = NULL;
+
+	while (resp->result == QSEOS_RESULT_INCOMPLETE) {
+		lstnr = resp->data;
+		/*
+		 * Wake up blocking lsitener service with the lstnr id
+		 */
+		spin_lock_irqsave(&qseecom.registered_listener_list_lock,
+					flags);
+		list_for_each_entry(ptr_svc,
+				&qseecom.registered_listener_list_head, list) {
+			if (ptr_svc->svc.listener_id == lstnr) {
+				ptr_svc->listener_in_use = true;
+				ptr_svc->rcv_req_flag = 1;
+				wake_up_interruptible(&ptr_svc->rcv_req_wq);
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
+				flags);
+
+		if (ptr_svc == NULL) {
+			pr_err("Listener Svc %d does not exist\n", lstnr);
+			__qseecom_qseos_fail_return_resp_tz(data, resp,
+					&send_data_rsp, ptr_svc, lstnr);
+			return -EINVAL;
+		}
+
+		if (!ptr_svc->ihandle) {
+			pr_err("Client handle is not initialized\n");
+			__qseecom_qseos_fail_return_resp_tz(data, resp,
+					&send_data_rsp, ptr_svc, lstnr);
+			return -EINVAL;
+		}
+
+		if (ptr_svc->svc.listener_id != lstnr) {
+			pr_warn("Service requested does not exist\n");
+			__qseecom_qseos_fail_return_resp_tz(data, resp,
+					&send_data_rsp, ptr_svc, lstnr);
+			return -ERESTARTSYS;
+		}
+		pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
+
+		/* initialize the new signal mask with all signals*/
+		sigfillset(&new_sigset);
+		/* block all signals */
+		sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+
+		do {
+			/*
+			 * When reentrancy is not supported, check global
+			 * send_resp_flag; otherwise, check this listener's
+			 * send_resp_flag.
+			 */
+			if (!qseecom.qsee_reentrancy_support &&
+				!wait_event_freezable(qseecom.send_resp_wq,
+				__qseecom_listener_has_sent_rsp(data))) {
+				break;
+			}
+
+			if (qseecom.qsee_reentrancy_support &&
+				!wait_event_freezable(qseecom.send_resp_wq,
+				__qseecom_reentrancy_listener_has_sent_rsp(
+						data, ptr_svc))) {
+				break;
+			}
+		} while (1);
+
+		/* restore signal mask */
+		sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+		if (data->abort) {
+			pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
+				data->client.app_id, lstnr, ret);
+			rc = -ENODEV;
+			status = QSEOS_RESULT_FAILURE;
+		} else {
+			status = QSEOS_RESULT_SUCCESS;
+		}
+
+		qseecom.send_resp_flag = 0;
+		ptr_svc->send_resp_flag = 0;
+		table = ptr_svc->sglistinfo_ptr;
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			send_data_rsp.listener_id  = lstnr;
+			send_data_rsp.status = status;
+			send_data_rsp.sglistinfo_ptr =
+				(uint32_t)virt_to_phys(table);
+			send_data_rsp.sglistinfo_len =
+				SGLISTINFO_TABLE_SIZE;
+			dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+			cmd_buf = (void *)&send_data_rsp;
+			cmd_len = sizeof(send_data_rsp);
+		} else {
+			send_data_rsp_64bit.listener_id  = lstnr;
+			send_data_rsp_64bit.status = status;
+			send_data_rsp_64bit.sglistinfo_ptr =
+				virt_to_phys(table);
+			send_data_rsp_64bit.sglistinfo_len =
+				SGLISTINFO_TABLE_SIZE;
+			dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+			cmd_buf = (void *)&send_data_rsp_64bit;
+			cmd_len = sizeof(send_data_rsp_64bit);
+		}
+		if (qseecom.whitelist_support == false)
+			*(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
+		else
+			*(uint32_t *)cmd_buf =
+				QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
+		if (ptr_svc) {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+					ptr_svc->ihandle,
+					ptr_svc->sb_virt, ptr_svc->sb_length,
+					ION_IOC_CLEAN_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				return ret;
+			}
+		}
+
+		if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
+			ret = __qseecom_enable_clk(CLK_QSEE);
+			if (ret)
+				return ret;
+		}
+
+		ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+					cmd_buf, cmd_len, resp, sizeof(*resp));
+		ptr_svc->listener_in_use = false;
+		__qseecom_clean_listener_sglistinfo(ptr_svc);
+		if (ret) {
+			pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+				ret, data->client.app_id);
+			if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
+				__qseecom_disable_clk(CLK_QSEE);
+			return ret;
+		}
+		if ((resp->result != QSEOS_RESULT_SUCCESS) &&
+			(resp->result != QSEOS_RESULT_INCOMPLETE)) {
+			pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
+				resp->result, data->client.app_id, lstnr);
+			ret = -EINVAL;
+		}
+		if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
+			__qseecom_disable_clk(CLK_QSEE);
+
+	}
+	if (rc)
+		return rc;
+
+	return ret;
+}
+
+int __qseecom_process_reentrancy_blocked_on_listener(
+				struct qseecom_command_scm_resp *resp,
+				struct qseecom_registered_app_list *ptr_app,
+				struct qseecom_dev_handle *data)
+{
+	struct qseecom_registered_listener_list *list_ptr;
+	int ret = 0;
+	struct qseecom_continue_blocked_request_ireq ireq;
+	struct qseecom_command_scm_resp continue_resp;
+	sigset_t new_sigset, old_sigset;
+	unsigned long flags;
+	bool found_app = false;
+
+	if (!resp || !data) {
+		pr_err("invalid resp or data pointer\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	/* find app_id & img_name from list */
+	if (!ptr_app) {
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+							list) {
+			if ((ptr_app->app_id == data->client.app_id) &&
+				(!strcmp(ptr_app->app_name,
+						data->client.app_name))) {
+				found_app = true;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+					flags);
+		if (!found_app) {
+			pr_err("app_id %d (%s) is not found\n",
+				data->client.app_id,
+				(char *)data->client.app_name);
+			ret = -ENOENT;
+			goto exit;
+		}
+	}
+
+	list_ptr = __qseecom_find_svc(resp->data);
+	if (!list_ptr) {
+		pr_err("Invalid listener ID\n");
+		ret = -ENODATA;
+		goto exit;
+	}
+	pr_debug("lsntr %d in_use = %d\n",
+			resp->data, list_ptr->listener_in_use);
+	ptr_app->blocked_on_listener_id = resp->data;
+	/* sleep until listener is available */
+	do {
+		qseecom.app_block_ref_cnt++;
+		ptr_app->app_blocked = true;
+		sigfillset(&new_sigset);
+		sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+		mutex_unlock(&app_access_lock);
+		do {
+			if (!wait_event_freezable(
+				list_ptr->listener_block_app_wq,
+				!list_ptr->listener_in_use)) {
+				break;
+			}
+		} while (1);
+		mutex_lock(&app_access_lock);
+		sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+		ptr_app->app_blocked = false;
+		qseecom.app_block_ref_cnt--;
+	} while (list_ptr->listener_in_use == true);
+	ptr_app->blocked_on_listener_id = 0;
+	/* notify the blocked app that listener is available */
+	pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
+		resp->data, data->client.app_id,
+		data->client.app_name);
+	ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
+	ireq.app_id = data->client.app_id;
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+			&ireq, sizeof(ireq),
+			&continue_resp, sizeof(continue_resp));
+	if (ret) {
+		pr_err("scm_call for continue blocked req for app(%d) %s failed, ret %d\n",
+			data->client.app_id,
+			data->client.app_name, ret);
+		goto exit;
+	}
+	/*
+	 * After TZ app is unblocked, then continue to next case
+	 * for incomplete request processing
+	 */
+	resp->result = QSEOS_RESULT_INCOMPLETE;
+exit:
+	return ret;
+}
+
+static int __qseecom_reentrancy_process_incomplete_cmd(
+					struct qseecom_dev_handle *data,
+					struct qseecom_command_scm_resp *resp)
+{
+	int ret = 0;
+	int rc = 0;
+	uint32_t lstnr;
+	unsigned long flags;
+	struct qseecom_client_listener_data_irsp send_data_rsp;
+	struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
+	struct qseecom_registered_listener_list *ptr_svc = NULL;
+	sigset_t new_sigset;
+	sigset_t old_sigset;
+	uint32_t status;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = NULL;
+
+	while (ret == 0 && rc == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
+		lstnr = resp->data;
+		/*
+		 * Wake up blocking lsitener service with the lstnr id
+		 */
+		spin_lock_irqsave(&qseecom.registered_listener_list_lock,
+					flags);
+		list_for_each_entry(ptr_svc,
+				&qseecom.registered_listener_list_head, list) {
+			if (ptr_svc->svc.listener_id == lstnr) {
+				ptr_svc->listener_in_use = true;
+				ptr_svc->rcv_req_flag = 1;
+				wake_up_interruptible(&ptr_svc->rcv_req_wq);
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
+				flags);
+
+		if (ptr_svc == NULL) {
+			pr_err("Listener Svc %d does not exist\n", lstnr);
+			return -EINVAL;
+		}
+
+		if (!ptr_svc->ihandle) {
+			pr_err("Client handle is not initialized\n");
+			return -EINVAL;
+		}
+
+		if (ptr_svc->svc.listener_id != lstnr) {
+			pr_warn("Service requested does not exist\n");
+			return -ERESTARTSYS;
+		}
+		pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
+
+		/* initialize the new signal mask with all signals*/
+		sigfillset(&new_sigset);
+
+		/* block all signals */
+		sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+
+		/* unlock mutex btw waking listener and sleep-wait */
+		mutex_unlock(&app_access_lock);
+		do {
+			if (!wait_event_freezable(qseecom.send_resp_wq,
+				__qseecom_reentrancy_listener_has_sent_rsp(
+						data, ptr_svc))) {
+				break;
+			}
+		} while (1);
+		/* lock mutex again after resp sent */
+		mutex_lock(&app_access_lock);
+		ptr_svc->send_resp_flag = 0;
+		qseecom.send_resp_flag = 0;
+
+		/* restore signal mask */
+		sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+		if (data->abort) {
+			pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
+				data->client.app_id, lstnr, ret);
+			rc = -ENODEV;
+			status  = QSEOS_RESULT_FAILURE;
+		} else {
+			status  = QSEOS_RESULT_SUCCESS;
+		}
+		table = ptr_svc->sglistinfo_ptr;
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			send_data_rsp.listener_id  = lstnr;
+			send_data_rsp.status = status;
+			send_data_rsp.sglistinfo_ptr =
+				(uint32_t)virt_to_phys(table);
+			send_data_rsp.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+			dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+			cmd_buf = (void *)&send_data_rsp;
+			cmd_len = sizeof(send_data_rsp);
+		} else {
+			send_data_rsp_64bit.listener_id  = lstnr;
+			send_data_rsp_64bit.status = status;
+			send_data_rsp_64bit.sglistinfo_ptr =
+				virt_to_phys(table);
+			send_data_rsp_64bit.sglistinfo_len =
+				SGLISTINFO_TABLE_SIZE;
+			dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+			cmd_buf = (void *)&send_data_rsp_64bit;
+			cmd_len = sizeof(send_data_rsp_64bit);
+		}
+		if (qseecom.whitelist_support == false)
+			*(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
+		else
+			*(uint32_t *)cmd_buf =
+				QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
+		if (ptr_svc) {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+					ptr_svc->ihandle,
+					ptr_svc->sb_virt, ptr_svc->sb_length,
+					ION_IOC_CLEAN_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				return ret;
+			}
+		}
+		if (lstnr == RPMB_SERVICE) {
+			ret = __qseecom_enable_clk(CLK_QSEE);
+			if (ret)
+				return ret;
+		}
+
+		ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+					cmd_buf, cmd_len, resp, sizeof(*resp));
+		ptr_svc->listener_in_use = false;
+		__qseecom_clean_listener_sglistinfo(ptr_svc);
+		wake_up_interruptible(&ptr_svc->listener_block_app_wq);
+
+		if (ret) {
+			pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+				ret, data->client.app_id);
+			goto exit;
+		}
+
+		switch (resp->result) {
+		case QSEOS_RESULT_BLOCKED_ON_LISTENER:
+			pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
+					lstnr, data->client.app_id, resp->data);
+			if (lstnr == resp->data) {
+				pr_err("lstnr %d should not be blocked!\n",
+					lstnr);
+				ret = -EINVAL;
+				goto exit;
+			}
+			ret = __qseecom_process_reentrancy_blocked_on_listener(
+					resp, NULL, data);
+			if (ret) {
+				pr_err("failed to process App(%d) %s blocked on listener %d\n",
+					data->client.app_id,
+					data->client.app_name, resp->data);
+				goto exit;
+			}
+		case QSEOS_RESULT_SUCCESS:
+		case QSEOS_RESULT_INCOMPLETE:
+			break;
+		default:
+			pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
+				resp->result, data->client.app_id, lstnr);
+			ret = -EINVAL;
+			goto exit;
+		}
+exit:
+		if (lstnr == RPMB_SERVICE)
+			__qseecom_disable_clk(CLK_QSEE);
+
+	}
+	if (rc)
+		return rc;
+
+	return ret;
+}
+
+/*
+ * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
+ * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
+ * So, needs to first check if no app blocked before sending OS level scm call,
+ * then wait until all apps are unblocked.
+ */
+static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
+{
+	sigset_t new_sigset, old_sigset;
+
+	if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
+		qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
+		IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
+		/* thread sleep until this app unblocked */
+		while (qseecom.app_block_ref_cnt > 0) {
+			sigfillset(&new_sigset);
+			sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+			mutex_unlock(&app_access_lock);
+			do {
+				if (!wait_event_freezable(qseecom.app_block_wq,
+					(qseecom.app_block_ref_cnt == 0)))
+					break;
+			} while (1);
+			mutex_lock(&app_access_lock);
+			sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+		}
+	}
+}
+
+/*
+ * scm_call of send data will fail if this TA is blocked or there are more
+ * than one TA requesting listener services; So, first check to see if need
+ * to wait.
+ */
+static void __qseecom_reentrancy_check_if_this_app_blocked(
+			struct qseecom_registered_app_list *ptr_app)
+{
+	sigset_t new_sigset, old_sigset;
+
+	if (qseecom.qsee_reentrancy_support) {
+		while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
+			/* thread sleep until this app unblocked */
+			sigfillset(&new_sigset);
+			sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+			mutex_unlock(&app_access_lock);
+			do {
+				if (!wait_event_freezable(qseecom.app_block_wq,
+					(!ptr_app->app_blocked &&
+					qseecom.app_block_ref_cnt <= 1)))
+					break;
+			} while (1);
+			mutex_lock(&app_access_lock);
+			sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+		}
+	}
+}
+
+static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
+					uint32_t *app_id)
+{
+	int32_t ret;
+	struct qseecom_command_scm_resp resp;
+	bool found_app = false;
+	struct qseecom_registered_app_list *entry = NULL;
+	unsigned long flags = 0;
+
+	if (!app_id) {
+		pr_err("Null pointer to app_id\n");
+		return -EINVAL;
+	}
+	*app_id = 0;
+
+	/* check if app exists and has been registered locally */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(entry,
+			&qseecom.registered_app_list_head, list) {
+		if (!strcmp(entry->app_name, req.app_name)) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+	if (found_app) {
+		pr_debug("Found app with id %d\n", entry->app_id);
+		*app_id = entry->app_id;
+		return 0;
+	}
+
+	memset((void *)&resp, 0, sizeof(resp));
+
+	/*  SCM_CALL  to check if app_id for the mentioned app exists */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+				sizeof(struct qseecom_check_app_ireq),
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to check if app is already loaded failed\n");
+		return -EINVAL;
+	}
+
+	if (resp.result == QSEOS_RESULT_FAILURE)
+		return 0;
+
+	switch (resp.resp_type) {
+	/*qsee returned listener type response */
+	case QSEOS_LISTENER_ID:
+		pr_err("resp type is of listener type instead of app");
+		return -EINVAL;
+	case QSEOS_APP_ID:
+		*app_id = resp.data;
+		return 0;
+	default:
+		pr_err("invalid resp type (%d) from qsee",
+				resp.resp_type);
+		return -ENODEV;
+	}
+}
+
+static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
+{
+	struct qseecom_registered_app_list *entry = NULL;
+	unsigned long flags = 0;
+	u32 app_id = 0;
+	struct ion_handle *ihandle;	/* Ion handle */
+	struct qseecom_load_img_req load_img_req;
+	int32_t ret = 0;
+	ion_phys_addr_t pa = 0;
+	size_t len;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_check_app_ireq req;
+	struct qseecom_load_app_ireq load_req;
+	struct qseecom_load_app_64bit_ireq load_req_64bit;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	bool first_time = false;
+
+	/* Copy the relevant information needed for loading the image */
+	if (copy_from_user(&load_img_req,
+				(void __user *)argp,
+				sizeof(struct qseecom_load_img_req))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	/* Check and load cmnlib */
+	if (qseecom.qsee_version > QSEEE_VERSION_00) {
+		if (!qseecom.commonlib_loaded &&
+				load_img_req.app_arch == ELFCLASS32) {
+			ret = qseecom_load_commonlib_image(data, "cmnlib");
+			if (ret) {
+				pr_err("failed to load cmnlib\n");
+				return -EIO;
+			}
+			qseecom.commonlib_loaded = true;
+			pr_debug("cmnlib is loaded\n");
+		}
+
+		if (!qseecom.commonlib64_loaded &&
+				load_img_req.app_arch == ELFCLASS64) {
+			ret = qseecom_load_commonlib_image(data, "cmnlib64");
+			if (ret) {
+				pr_err("failed to load cmnlib64\n");
+				return -EIO;
+			}
+			qseecom.commonlib64_loaded = true;
+			pr_debug("cmnlib64 is loaded\n");
+		}
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+		mutex_unlock(&qsee_bw_mutex);
+		if (ret)
+			return ret;
+	}
+
+	/* Vote for the SFPB clock */
+	ret = __qseecom_enable_clk_scale_up(data);
+	if (ret)
+		goto enable_clk_err;
+
+	req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+	load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
+	strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
+
+	ret = __qseecom_check_app_exists(req, &app_id);
+	if (ret < 0)
+		goto loadapp_err;
+
+	if (app_id) {
+		pr_debug("App id %d (%s) already exists\n", app_id,
+			(char *)(req.app_name));
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(entry,
+		&qseecom.registered_app_list_head, list){
+			if (entry->app_id == app_id) {
+				entry->ref_cnt++;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(
+		&qseecom.registered_app_list_lock, flags);
+		ret = 0;
+	} else {
+		first_time = true;
+		pr_warn("App (%s) does'nt exist, loading apps for first time\n",
+			(char *)(load_img_req.img_name));
+		/* Get the handle of the shared fd */
+		ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+					load_img_req.ifd_data_fd);
+		if (IS_ERR_OR_NULL(ihandle)) {
+			pr_err("Ion client could not retrieve the handle\n");
+			ret = -ENOMEM;
+			goto loadapp_err;
+		}
+
+		/* Get the physical address of the ION BUF */
+		ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
+		if (ret) {
+			pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+				ret);
+			goto loadapp_err;
+		}
+		if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
+			pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
+					len, load_img_req.mdt_len,
+					load_img_req.img_len);
+			ret = -EINVAL;
+			goto loadapp_err;
+		}
+		/* Populate the structure for sending scm call to load image */
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+			load_req.mdt_len = load_img_req.mdt_len;
+			load_req.img_len = load_img_req.img_len;
+			strlcpy(load_req.app_name, load_img_req.img_name,
+						MAX_APP_NAME_SIZE);
+			load_req.phy_addr = (uint32_t)pa;
+			cmd_buf = (void *)&load_req;
+			cmd_len = sizeof(struct qseecom_load_app_ireq);
+		} else {
+			load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+			load_req_64bit.mdt_len = load_img_req.mdt_len;
+			load_req_64bit.img_len = load_img_req.img_len;
+			strlcpy(load_req_64bit.app_name, load_img_req.img_name,
+						MAX_APP_NAME_SIZE);
+			load_req_64bit.phy_addr = (uint64_t)pa;
+			cmd_buf = (void *)&load_req_64bit;
+			cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
+		}
+
+		ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
+					ION_IOC_CLEAN_INV_CACHES);
+		if (ret) {
+			pr_err("cache operation failed %d\n", ret);
+			goto loadapp_err;
+		}
+
+		/*  SCM_CALL  to load the app and get the app_id back */
+		ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
+			cmd_len, &resp, sizeof(resp));
+		if (ret) {
+			pr_err("scm_call to load app failed\n");
+			if (!IS_ERR_OR_NULL(ihandle))
+				ion_free(qseecom.ion_clnt, ihandle);
+			ret = -EINVAL;
+			goto loadapp_err;
+		}
+
+		if (resp.result == QSEOS_RESULT_FAILURE) {
+			pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
+			if (!IS_ERR_OR_NULL(ihandle))
+				ion_free(qseecom.ion_clnt, ihandle);
+			ret = -EFAULT;
+			goto loadapp_err;
+		}
+
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+					ret);
+				if (!IS_ERR_OR_NULL(ihandle))
+					ion_free(qseecom.ion_clnt, ihandle);
+				ret = -EFAULT;
+				goto loadapp_err;
+			}
+		}
+
+		if (resp.result != QSEOS_RESULT_SUCCESS) {
+			pr_err("scm_call failed resp.result unknown, %d\n",
+				resp.result);
+			if (!IS_ERR_OR_NULL(ihandle))
+				ion_free(qseecom.ion_clnt, ihandle);
+			ret = -EFAULT;
+			goto loadapp_err;
+		}
+
+		app_id = resp.data;
+
+		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+		if (!entry) {
+			ret = -ENOMEM;
+			goto loadapp_err;
+		}
+		entry->app_id = app_id;
+		entry->ref_cnt = 1;
+		entry->app_arch = load_img_req.app_arch;
+		/*
+		 * keymaster app may be first loaded as "keymaste" by qseecomd,
+		 * and then used as "keymaster" on some targets. To avoid app
+		 * name checking error, register "keymaster" into app_list and
+		 * thread private data.
+		 */
+		if (!strcmp(load_img_req.img_name, "keymaste"))
+			strlcpy(entry->app_name, "keymaster",
+					MAX_APP_NAME_SIZE);
+		else
+			strlcpy(entry->app_name, load_img_req.img_name,
+					MAX_APP_NAME_SIZE);
+		entry->app_blocked = false;
+		entry->blocked_on_listener_id = 0;
+
+		/* Deallocate the handle */
+		if (!IS_ERR_OR_NULL(ihandle))
+			ion_free(qseecom.ion_clnt, ihandle);
+
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_add_tail(&entry->list, &qseecom.registered_app_list_head);
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+									flags);
+
+		pr_warn("App with id %u (%s) now loaded\n", app_id,
+		(char *)(load_img_req.img_name));
+	}
+	data->client.app_id = app_id;
+	data->client.app_arch = load_img_req.app_arch;
+	if (!strcmp(load_img_req.img_name, "keymaste"))
+		strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
+	else
+		strlcpy(data->client.app_name, load_img_req.img_name,
+					MAX_APP_NAME_SIZE);
+	load_img_req.app_id = app_id;
+	if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
+		pr_err("copy_to_user failed\n");
+		ret = -EFAULT;
+		if (first_time == true) {
+			spin_lock_irqsave(
+				&qseecom.registered_app_list_lock, flags);
+			list_del(&entry->list);
+			spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+			kzfree(entry);
+		}
+	}
+
+loadapp_err:
+	__qseecom_disable_clk_scale_down(data);
+enable_clk_err:
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		qseecom_unregister_bus_bandwidth_needs(data);
+		mutex_unlock(&qsee_bw_mutex);
+	}
+	return ret;
+}
+
+static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
+{
+	int ret = 1;	/* Set unload app */
+
+	wake_up_all(&qseecom.send_resp_wq);
+	if (qseecom.qsee_reentrancy_support)
+		mutex_unlock(&app_access_lock);
+	while (atomic_read(&data->ioctl_count) > 1) {
+		if (wait_event_freezable(data->abort_wq,
+					atomic_read(&data->ioctl_count) <= 1)) {
+			pr_err("Interrupted from abort\n");
+			ret = -ERESTARTSYS;
+			break;
+		}
+	}
+	if (qseecom.qsee_reentrancy_support)
+		mutex_lock(&app_access_lock);
+	return ret;
+}
+
+static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+
+	if (!IS_ERR_OR_NULL(data->client.ihandle)) {
+		ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
+		ion_free(qseecom.ion_clnt, data->client.ihandle);
+		data->client.ihandle = NULL;
+	}
+	return ret;
+}
+
+static int qseecom_unload_app(struct qseecom_dev_handle *data,
+				bool app_crash)
+{
+	unsigned long flags;
+	unsigned long flags1;
+	int ret = 0;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_registered_app_list *ptr_app = NULL;
+	bool unload = false;
+	bool found_app = false;
+	bool found_dead_app = false;
+
+	if (!data) {
+		pr_err("Invalid/uninitialized device handle\n");
+		return -EINVAL;
+	}
+
+	if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
+		pr_debug("Do not unload keymaster app from tz\n");
+		goto unload_exit;
+	}
+
+	__qseecom_cleanup_app(data);
+	__qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
+
+	if (data->client.app_id > 0) {
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+									list) {
+			if (ptr_app->app_id == data->client.app_id) {
+				if (!strcmp((void *)ptr_app->app_name,
+					(void *)data->client.app_name)) {
+					found_app = true;
+					if (app_crash || ptr_app->ref_cnt == 1)
+						unload = true;
+					break;
+				}
+				found_dead_app = true;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+								flags);
+		if (found_app == false && found_dead_app == false) {
+			pr_err("Cannot find app with id = %d (%s)\n",
+				data->client.app_id,
+				(char *)data->client.app_name);
+			ret = -EINVAL;
+			goto unload_exit;
+		}
+	}
+
+	if (found_dead_app)
+		pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
+			(char *)data->client.app_name);
+
+	if (unload) {
+		struct qseecom_unload_app_ireq req;
+		/* Populate the structure for sending scm call to load image */
+		req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
+		req.app_id = data->client.app_id;
+
+		/* SCM_CALL to unload the app */
+		ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+				sizeof(struct qseecom_unload_app_ireq),
+				&resp, sizeof(resp));
+		if (ret) {
+			pr_err("scm_call to unload app (id = %d) failed\n",
+								req.app_id);
+			ret = -EFAULT;
+			goto unload_exit;
+		} else {
+			pr_warn("App id %d now unloaded\n", req.app_id);
+		}
+		if (resp.result == QSEOS_RESULT_FAILURE) {
+			pr_err("app (%d) unload_failed!!\n",
+					data->client.app_id);
+			ret = -EFAULT;
+			goto unload_exit;
+		}
+		if (resp.result == QSEOS_RESULT_SUCCESS)
+			pr_debug("App (%d) is unloaded!!\n",
+					data->client.app_id);
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd fail err: %d\n",
+									ret);
+				goto unload_exit;
+			}
+		}
+	}
+
+	if (found_app) {
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
+		if (app_crash) {
+			ptr_app->ref_cnt = 0;
+			pr_debug("app_crash: ref_count = 0\n");
+		} else {
+			if (ptr_app->ref_cnt == 1) {
+				ptr_app->ref_cnt = 0;
+				pr_debug("ref_count set to 0\n");
+			} else {
+				ptr_app->ref_cnt--;
+				pr_debug("Can't unload app(%d) inuse\n",
+					ptr_app->app_id);
+			}
+		}
+		if (unload) {
+			list_del(&ptr_app->list);
+			kzfree(ptr_app);
+		}
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+								flags1);
+	}
+unload_exit:
+	qseecom_unmap_ion_allocated_memory(data);
+	data->released = true;
+	return ret;
+}
+
+static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
+						unsigned long virt)
+{
+	return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
+}
+
+static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
+						unsigned long virt)
+{
+	return (uintptr_t)data->client.sb_virt +
+				(virt - data->client.user_virt_sb_base);
+}
+
+int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
+		struct qseecom_send_svc_cmd_req *req_ptr,
+		struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
+{
+	int ret = 0;
+	void *req_buf = NULL;
+
+	if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
+		pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
+			req_ptr, send_svc_ireq_ptr);
+		return -EINVAL;
+	}
+
+	/* Clients need to ensure req_buf is at base offset of shared buffer */
+	if ((uintptr_t)req_ptr->cmd_req_buf !=
+			data_ptr->client.user_virt_sb_base) {
+		pr_err("cmd buf not pointing to base offset of shared buffer\n");
+		return -EINVAL;
+	}
+
+	if (data_ptr->client.sb_length <
+			sizeof(struct qseecom_rpmb_provision_key)) {
+		pr_err("shared buffer is too small to hold key type\n");
+		return -EINVAL;
+	}
+	req_buf = data_ptr->client.sb_virt;
+
+	send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
+	send_svc_ireq_ptr->key_type =
+		((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
+	send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
+	send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+			data_ptr, (uintptr_t)req_ptr->resp_buf));
+	send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
+
+	return ret;
+}
+
+int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
+		struct qseecom_send_svc_cmd_req *req_ptr,
+		struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
+{
+	int ret = 0;
+	uint32_t reqd_len_sb_in = 0;
+
+	if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
+		pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
+			req_ptr, send_svc_ireq_ptr);
+		return -EINVAL;
+	}
+
+	reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
+	if (reqd_len_sb_in > data_ptr->client.sb_length) {
+		pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
+		pr_err("Required: %u, Available: %zu\n",
+				reqd_len_sb_in, data_ptr->client.sb_length);
+		return -ENOMEM;
+	}
+
+	send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
+	send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
+	send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+			data_ptr, (uintptr_t)req_ptr->resp_buf));
+	send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
+
+	send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+			data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
+
+
+	return ret;
+}
+
+static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
+				struct qseecom_send_svc_cmd_req *req)
+{
+	if (!req || !req->resp_buf || !req->cmd_req_buf) {
+		pr_err("req or cmd buffer or response buffer is null\n");
+		return -EINVAL;
+	}
+
+	if (!data || !data->client.ihandle) {
+		pr_err("Client or client handle is not initialized\n");
+		return -EINVAL;
+	}
+
+	if (data->client.sb_virt == NULL) {
+		pr_err("sb_virt null\n");
+		return -EINVAL;
+	}
+
+	if (data->client.user_virt_sb_base == 0) {
+		pr_err("user_virt_sb_base is null\n");
+		return -EINVAL;
+	}
+
+	if (data->client.sb_length == 0) {
+		pr_err("sb_length is 0\n");
+		return -EINVAL;
+	}
+
+	if (((uintptr_t)req->cmd_req_buf <
+				data->client.user_virt_sb_base) ||
+		((uintptr_t)req->cmd_req_buf >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("cmd buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+	if (((uintptr_t)req->resp_buf <
+				data->client.user_virt_sb_base)  ||
+		((uintptr_t)req->resp_buf >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("response buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+	if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
+		(req->cmd_req_len > data->client.sb_length) ||
+		(req->resp_len > data->client.sb_length)) {
+		pr_err("cmd buf length or response buf length not valid\n");
+		return -EINVAL;
+	}
+	if (req->cmd_req_len > UINT_MAX - req->resp_len) {
+		pr_err("Integer overflow detected in req_len & rsp_len\n");
+		return -EINVAL;
+	}
+
+	if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
+		pr_debug("Not enough memory to fit cmd_buf.\n");
+		pr_debug("resp_buf. Required: %u, Available: %zu\n",
+				(req->cmd_req_len + req->resp_len),
+					data->client.sb_length);
+		return -ENOMEM;
+	}
+	if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
+		pr_err("Integer overflow in req_len & cmd_req_buf\n");
+		return -EINVAL;
+	}
+	if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
+		pr_err("Integer overflow in resp_len & resp_buf\n");
+		return -EINVAL;
+	}
+	if (data->client.user_virt_sb_base >
+					(ULONG_MAX - data->client.sb_length)) {
+		pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+		return -EINVAL;
+	}
+	if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+					data->client.sb_length)) ||
+		(((uintptr_t)req->resp_buf + req->resp_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+					data->client.sb_length))) {
+		pr_err("cmd buf or resp buf is out of shared buffer region\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	int ret = 0;
+	struct qseecom_client_send_service_ireq send_svc_ireq;
+	struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_send_svc_cmd_req req;
+	void   *send_req_ptr;
+	size_t req_buf_size;
+
+	/*struct qseecom_command_scm_resp resp;*/
+
+	if (copy_from_user(&req,
+				(void __user *)argp,
+				sizeof(req))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	if (__validate_send_service_cmd_inputs(data, &req))
+		return -EINVAL;
+
+	data->type = QSEECOM_SECURE_SERVICE;
+
+	switch (req.cmd_id) {
+	case QSEOS_RPMB_PROVISION_KEY_COMMAND:
+	case QSEOS_RPMB_ERASE_COMMAND:
+	case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
+		send_req_ptr = &send_svc_ireq;
+		req_buf_size = sizeof(send_svc_ireq);
+		if (__qseecom_process_rpmb_svc_cmd(data, &req,
+				send_req_ptr))
+			return -EINVAL;
+		break;
+	case QSEOS_FSM_LTEOTA_REQ_CMD:
+	case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
+	case QSEOS_FSM_IKE_REQ_CMD:
+	case QSEOS_FSM_IKE_REQ_RSP_CMD:
+	case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
+	case QSEOS_FSM_OEM_FUSE_READ_ROW:
+	case QSEOS_FSM_ENCFS_REQ_CMD:
+	case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
+		send_req_ptr = &send_fsm_key_svc_ireq;
+		req_buf_size = sizeof(send_fsm_key_svc_ireq);
+		if (__qseecom_process_fsm_key_svc_cmd(data, &req,
+				send_req_ptr))
+			return -EINVAL;
+		break;
+	default:
+		pr_err("Unsupported cmd_id %d\n", req.cmd_id);
+		return -EINVAL;
+	}
+
+	if (qseecom.support_bus_scaling) {
+		ret = qseecom_scale_bus_bandwidth_timer(HIGH);
+		if (ret) {
+			pr_err("Fail to set bw HIGH\n");
+			return ret;
+		}
+	} else {
+		ret = qseecom_perf_enable(data);
+		if (ret) {
+			pr_err("Failed to vote for clocks with err %d\n", ret);
+			goto exit;
+		}
+	}
+
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+				data->client.sb_virt, data->client.sb_length,
+				ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit;
+	}
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				(const void *)send_req_ptr,
+				req_buf_size, &resp, sizeof(resp));
+	if (ret) {
+		pr_err("qseecom_scm_call failed with err: %d\n", ret);
+		if (!qseecom.support_bus_scaling) {
+			qsee_disable_clock_vote(data, CLK_DFAB);
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		} else {
+			__qseecom_add_bw_scale_down_timer(
+				QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+		}
+		goto exit;
+	}
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+				data->client.sb_virt, data->client.sb_length,
+				ION_IOC_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit;
+	}
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		pr_debug("qseos_result_incomplete\n");
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd fail with result: %d\n",
+				resp.result);
+		}
+		if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
+			pr_warn("RPMB key status is 0x%x\n", resp.result);
+			*(uint32_t *)req.resp_buf = resp.result;
+			ret = 0;
+		}
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("scm call failed with resp.result: %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	default:
+		pr_err("Response result %d not supported\n",
+				resp.result);
+		ret = -EINVAL;
+		break;
+	}
+	if (!qseecom.support_bus_scaling) {
+		qsee_disable_clock_vote(data, CLK_DFAB);
+		qsee_disable_clock_vote(data, CLK_SFPB);
+	} else {
+		__qseecom_add_bw_scale_down_timer(
+			QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+	}
+
+exit:
+	return ret;
+}
+
+static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
+				struct qseecom_send_cmd_req *req)
+
+{
+	if (!data || !data->client.ihandle) {
+		pr_err("Client or client handle is not initialized\n");
+		return -EINVAL;
+	}
+	if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
+						(req->cmd_req_buf == NULL)) {
+		pr_err("cmd buffer or response buffer is null\n");
+		return -EINVAL;
+	}
+	if (((uintptr_t)req->cmd_req_buf <
+				data->client.user_virt_sb_base) ||
+		((uintptr_t)req->cmd_req_buf >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("cmd buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+	if (((uintptr_t)req->resp_buf <
+				data->client.user_virt_sb_base)  ||
+		((uintptr_t)req->resp_buf >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("response buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+	if ((req->cmd_req_len == 0) ||
+		(req->cmd_req_len > data->client.sb_length) ||
+		(req->resp_len > data->client.sb_length)) {
+		pr_err("cmd buf length or response buf length not valid\n");
+		return -EINVAL;
+	}
+	if (req->cmd_req_len > UINT_MAX - req->resp_len) {
+		pr_err("Integer overflow detected in req_len & rsp_len\n");
+		return -EINVAL;
+	}
+
+	if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
+		pr_debug("Not enough memory to fit cmd_buf.\n");
+		pr_debug("resp_buf. Required: %u, Available: %zu\n",
+				(req->cmd_req_len + req->resp_len),
+					data->client.sb_length);
+		return -ENOMEM;
+	}
+	if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
+		pr_err("Integer overflow in req_len & cmd_req_buf\n");
+		return -EINVAL;
+	}
+	if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
+		pr_err("Integer overflow in resp_len & resp_buf\n");
+		return -EINVAL;
+	}
+	if (data->client.user_virt_sb_base >
+					(ULONG_MAX - data->client.sb_length)) {
+		pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+		return -EINVAL;
+	}
+	if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+						data->client.sb_length)) ||
+		(((uintptr_t)req->resp_buf + req->resp_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+						data->client.sb_length))) {
+		pr_err("cmd buf or resp buf is out of shared buffer region\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
+				struct qseecom_registered_app_list *ptr_app,
+				struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+
+	switch (resp->result) {
+	case QSEOS_RESULT_BLOCKED_ON_LISTENER:
+		pr_warn("App(%d) %s is blocked on listener %d\n",
+			data->client.app_id, data->client.app_name,
+			resp->data);
+		ret = __qseecom_process_reentrancy_blocked_on_listener(
+					resp, ptr_app, data);
+		if (ret) {
+			pr_err("failed to process App(%d) %s is blocked on listener %d\n",
+			data->client.app_id, data->client.app_name, resp->data);
+			return ret;
+		}
+
+	case QSEOS_RESULT_INCOMPLETE:
+		qseecom.app_block_ref_cnt++;
+		ptr_app->app_blocked = true;
+		ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
+		ptr_app->app_blocked = false;
+		qseecom.app_block_ref_cnt--;
+		wake_up_interruptible(&qseecom.app_block_wq);
+		if (ret)
+			pr_err("process_incomplete_cmd failed err: %d\n",
+					ret);
+		return ret;
+	case QSEOS_RESULT_SUCCESS:
+		return ret;
+	default:
+		pr_err("Response result %d not supported\n",
+						resp->result);
+		return -EINVAL;
+	}
+}
+
+static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
+				struct qseecom_send_cmd_req *req)
+{
+	int ret = 0;
+	u32 reqd_len_sb_in = 0;
+	struct qseecom_client_send_data_ireq send_data_req = {0};
+	struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
+	struct qseecom_command_scm_resp resp;
+	unsigned long flags;
+	struct qseecom_registered_app_list *ptr_app;
+	bool found_app = false;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = data->sglistinfo_ptr;
+
+	reqd_len_sb_in = req->cmd_req_len + req->resp_len;
+	/* find app_id & img_name from list */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+							list) {
+		if ((ptr_app->app_id == data->client.app_id) &&
+			 (!strcmp(ptr_app->app_name, data->client.app_name))) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+
+	if (!found_app) {
+		pr_err("app_id %d (%s) is not found\n", data->client.app_id,
+			(char *)data->client.app_name);
+		return -ENOENT;
+	}
+
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		send_data_req.app_id = data->client.app_id;
+		send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+					data, (uintptr_t)req->cmd_req_buf));
+		send_data_req.req_len = req->cmd_req_len;
+		send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+					data, (uintptr_t)req->resp_buf));
+		send_data_req.rsp_len = req->resp_len;
+		send_data_req.sglistinfo_ptr =
+				(uint32_t)virt_to_phys(table);
+		send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+		cmd_buf = (void *)&send_data_req;
+		cmd_len = sizeof(struct qseecom_client_send_data_ireq);
+	} else {
+		send_data_req_64bit.app_id = data->client.app_id;
+		send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
+					(uintptr_t)req->cmd_req_buf);
+		send_data_req_64bit.req_len = req->cmd_req_len;
+		send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
+					(uintptr_t)req->resp_buf);
+		send_data_req_64bit.rsp_len = req->resp_len;
+		/* check if 32bit app's phys_addr region is under 4GB.*/
+		if ((data->client.app_arch == ELFCLASS32) &&
+			((send_data_req_64bit.req_ptr >=
+				PHY_ADDR_4G - send_data_req_64bit.req_len) ||
+			(send_data_req_64bit.rsp_ptr >=
+				PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
+			pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
+				data->client.app_name,
+				send_data_req_64bit.req_ptr,
+				send_data_req_64bit.req_len,
+				send_data_req_64bit.rsp_ptr,
+				send_data_req_64bit.rsp_len);
+			return -EFAULT;
+		}
+		send_data_req_64bit.sglistinfo_ptr =
+				(uint64_t)virt_to_phys(table);
+		send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+		cmd_buf = (void *)&send_data_req_64bit;
+		cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
+	}
+
+	if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
+		*(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
+	else
+		*(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
+
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+					data->client.sb_virt,
+					reqd_len_sb_in,
+					ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	__qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				cmd_buf, cmd_len,
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+		return ret;
+	}
+
+	if (qseecom.qsee_reentrancy_support) {
+		ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+	} else {
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+						ret);
+				return ret;
+			}
+		} else {
+			if (resp.result != QSEOS_RESULT_SUCCESS) {
+				pr_err("Response result %d not supported\n",
+								resp.result);
+				ret = -EINVAL;
+			}
+		}
+	}
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+				data->client.sb_virt, data->client.sb_length,
+				ION_IOC_INV_CACHES);
+	if (ret)
+		pr_err("cache operation failed %d\n", ret);
+	return ret;
+}
+
+static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
+{
+	int ret = 0;
+	struct qseecom_send_cmd_req req;
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	if (__validate_send_cmd_inputs(data, &req))
+		return -EINVAL;
+
+	ret = __qseecom_send_cmd(data, &req);
+
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
+			struct qseecom_send_modfd_listener_resp *lstnr_resp,
+			struct qseecom_dev_handle *data, int i) {
+
+	if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+						(req->ifd_data[i].fd > 0)) {
+		if ((req->cmd_req_len < sizeof(uint32_t)) ||
+			(req->ifd_data[i].cmd_buf_offset >
+			req->cmd_req_len - sizeof(uint32_t))) {
+			pr_err("Invalid offset (req len) 0x%x\n",
+				req->ifd_data[i].cmd_buf_offset);
+			return -EINVAL;
+		}
+	} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+					(lstnr_resp->ifd_data[i].fd > 0)) {
+		if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
+			(lstnr_resp->ifd_data[i].cmd_buf_offset >
+			lstnr_resp->resp_len - sizeof(uint32_t))) {
+			pr_err("Invalid offset (lstnr resp len) 0x%x\n",
+				lstnr_resp->ifd_data[i].cmd_buf_offset);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
+			struct qseecom_dev_handle *data)
+{
+	struct ion_handle *ihandle;
+	char *field;
+	int ret = 0;
+	int i = 0;
+	uint32_t len = 0;
+	struct scatterlist *sg;
+	struct qseecom_send_modfd_cmd_req *req = NULL;
+	struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
+	struct qseecom_registered_listener_list *this_lstnr = NULL;
+	uint32_t offset;
+	struct sg_table *sg_ptr;
+
+	if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+			(data->type != QSEECOM_CLIENT_APP))
+		return -EFAULT;
+
+	if (msg == NULL) {
+		pr_err("Invalid address\n");
+		return -EINVAL;
+	}
+	if (data->type == QSEECOM_LISTENER_SERVICE) {
+		lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
+		this_lstnr = __qseecom_find_svc(data->listener.id);
+		if (IS_ERR_OR_NULL(this_lstnr)) {
+			pr_err("Invalid listener ID\n");
+			return -ENOMEM;
+		}
+	} else {
+		req = (struct qseecom_send_modfd_cmd_req *)msg;
+	}
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+						(req->ifd_data[i].fd > 0)) {
+			ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+					req->ifd_data[i].fd);
+			if (IS_ERR_OR_NULL(ihandle)) {
+				pr_err("Ion client can't retrieve the handle\n");
+				return -ENOMEM;
+			}
+			field = (char *) req->cmd_req_buf +
+				req->ifd_data[i].cmd_buf_offset;
+		} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+				(lstnr_resp->ifd_data[i].fd > 0)) {
+			ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+						lstnr_resp->ifd_data[i].fd);
+			if (IS_ERR_OR_NULL(ihandle)) {
+				pr_err("Ion client can't retrieve the handle\n");
+				return -ENOMEM;
+			}
+			field = lstnr_resp->resp_buf_ptr +
+				lstnr_resp->ifd_data[i].cmd_buf_offset;
+		} else {
+			continue;
+		}
+		/* Populate the cmd data structure with the phys_addr */
+		sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
+		if (IS_ERR_OR_NULL(sg_ptr)) {
+			pr_err("IOn client could not retrieve sg table\n");
+			goto err;
+		}
+		if (sg_ptr->nents == 0) {
+			pr_err("Num of scattered entries is 0\n");
+			goto err;
+		}
+		if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
+			pr_err("Num of scattered entries");
+			pr_err(" (%d) is greater than max supported %d\n",
+				sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
+			goto err;
+		}
+		sg = sg_ptr->sgl;
+		if (sg_ptr->nents == 1) {
+			uint32_t *update;
+
+			if (__boundary_checks_offset(req, lstnr_resp, data, i))
+				goto err;
+			if ((data->type == QSEECOM_CLIENT_APP &&
+				(data->client.app_arch == ELFCLASS32 ||
+				data->client.app_arch == ELFCLASS64)) ||
+				(data->type == QSEECOM_LISTENER_SERVICE)) {
+				/*
+				 * Check if sg list phy add region is under 4GB
+				 */
+				if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
+					(!cleanup) &&
+					((uint64_t)sg_dma_address(sg_ptr->sgl)
+					>= PHY_ADDR_4G - sg->length)) {
+					pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
+						data->client.app_name,
+						&(sg_dma_address(sg_ptr->sgl)),
+						sg->length);
+					goto err;
+				}
+				update = (uint32_t *) field;
+				*update = cleanup ? 0 :
+					(uint32_t)sg_dma_address(sg_ptr->sgl);
+			} else {
+				pr_err("QSEE app arch %u is not supported\n",
+							data->client.app_arch);
+				goto err;
+			}
+			len += (uint32_t)sg->length;
+		} else {
+			struct qseecom_sg_entry *update;
+			int j = 0;
+
+			if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+					(req->ifd_data[i].fd > 0)) {
+
+				if ((req->cmd_req_len <
+					 SG_ENTRY_SZ * sg_ptr->nents) ||
+					(req->ifd_data[i].cmd_buf_offset >
+						(req->cmd_req_len -
+						SG_ENTRY_SZ * sg_ptr->nents))) {
+					pr_err("Invalid offset = 0x%x\n",
+					req->ifd_data[i].cmd_buf_offset);
+					goto err;
+				}
+
+			} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+					(lstnr_resp->ifd_data[i].fd > 0)) {
+
+				if ((lstnr_resp->resp_len <
+						SG_ENTRY_SZ * sg_ptr->nents) ||
+				(lstnr_resp->ifd_data[i].cmd_buf_offset >
+						(lstnr_resp->resp_len -
+						SG_ENTRY_SZ * sg_ptr->nents))) {
+					goto err;
+				}
+			}
+			if ((data->type == QSEECOM_CLIENT_APP &&
+				(data->client.app_arch == ELFCLASS32 ||
+				data->client.app_arch == ELFCLASS64)) ||
+				(data->type == QSEECOM_LISTENER_SERVICE)) {
+				update = (struct qseecom_sg_entry *)field;
+				for (j = 0; j < sg_ptr->nents; j++) {
+					/*
+					 * Check if sg list PA is under 4GB
+					 */
+					if ((qseecom.qsee_version >=
+						QSEE_VERSION_40) &&
+						(!cleanup) &&
+						((uint64_t)(sg_dma_address(sg))
+						>= PHY_ADDR_4G - sg->length)) {
+						pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
+							data->client.app_name,
+							&(sg_dma_address(sg)),
+							sg->length);
+						goto err;
+					}
+					update->phys_addr = cleanup ? 0 :
+						(uint32_t)sg_dma_address(sg);
+					update->len = cleanup ? 0 : sg->length;
+					update++;
+					len += sg->length;
+					sg = sg_next(sg);
+				}
+			} else {
+				pr_err("QSEE app arch %u is not supported\n",
+							data->client.app_arch);
+					goto err;
+			}
+		}
+
+		if (cleanup) {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+					ihandle, NULL, len,
+					ION_IOC_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+		} else {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+					ihandle, NULL, len,
+					ION_IOC_CLEAN_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+			if (data->type == QSEECOM_CLIENT_APP) {
+				offset = req->ifd_data[i].cmd_buf_offset;
+				data->sglistinfo_ptr[i].indexAndFlags =
+					SGLISTINFO_SET_INDEX_FLAG(
+					(sg_ptr->nents == 1), 0, offset);
+				data->sglistinfo_ptr[i].sizeOrCount =
+					(sg_ptr->nents == 1) ?
+					sg->length : sg_ptr->nents;
+				data->sglist_cnt = i + 1;
+			} else {
+				offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
+					+ (uintptr_t)lstnr_resp->resp_buf_ptr -
+					(uintptr_t)this_lstnr->sb_virt);
+				this_lstnr->sglistinfo_ptr[i].indexAndFlags =
+					SGLISTINFO_SET_INDEX_FLAG(
+					(sg_ptr->nents == 1), 0, offset);
+				this_lstnr->sglistinfo_ptr[i].sizeOrCount =
+					(sg_ptr->nents == 1) ?
+					sg->length : sg_ptr->nents;
+				this_lstnr->sglist_cnt = i + 1;
+			}
+		}
+		/* Deallocate the handle */
+		if (!IS_ERR_OR_NULL(ihandle))
+			ion_free(qseecom.ion_clnt, ihandle);
+	}
+	return ret;
+err:
+	if (!IS_ERR_OR_NULL(ihandle))
+		ion_free(qseecom.ion_clnt, ihandle);
+	return -ENOMEM;
+}
+
+static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
+		char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
+{
+	struct scatterlist *sg = sg_ptr->sgl;
+	struct qseecom_sg_entry_64bit *sg_entry;
+	struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
+	void *buf;
+	uint i;
+	size_t size;
+	dma_addr_t coh_pmem;
+
+	if (fd_idx >= MAX_ION_FD) {
+		pr_err("fd_idx [%d] is invalid\n", fd_idx);
+		return -ENOMEM;
+	}
+	buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
+	memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
+	/* Allocate a contiguous kernel buffer */
+	size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
+	size = (size + PAGE_SIZE) & PAGE_MASK;
+	buf = dma_alloc_coherent(qseecom.pdev,
+			size, &coh_pmem, GFP_KERNEL);
+	if (buf == NULL) {
+		pr_err("failed to alloc memory for sg buf\n");
+		return -ENOMEM;
+	}
+	/* update qseecom_sg_list_buf_hdr_64bit */
+	buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
+	buf_hdr->new_buf_phys_addr = coh_pmem;
+	buf_hdr->nents_total = sg_ptr->nents;
+	/* save the left sg entries into new allocated buf */
+	sg_entry = (struct qseecom_sg_entry_64bit *)buf;
+	for (i = 0; i < sg_ptr->nents; i++) {
+		sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
+		sg_entry->len = sg->length;
+		sg_entry++;
+		sg = sg_next(sg);
+	}
+
+	data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
+	data->client.sec_buf_fd[fd_idx].vbase = buf;
+	data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
+	data->client.sec_buf_fd[fd_idx].size = size;
+
+	return 0;
+}
+
+static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
+			struct qseecom_dev_handle *data)
+{
+	struct ion_handle *ihandle;
+	char *field;
+	int ret = 0;
+	int i = 0;
+	uint32_t len = 0;
+	struct scatterlist *sg;
+	struct qseecom_send_modfd_cmd_req *req = NULL;
+	struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
+	struct qseecom_registered_listener_list *this_lstnr = NULL;
+	uint32_t offset;
+	struct sg_table *sg_ptr;
+
+	if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+			(data->type != QSEECOM_CLIENT_APP))
+		return -EFAULT;
+
+	if (msg == NULL) {
+		pr_err("Invalid address\n");
+		return -EINVAL;
+	}
+	if (data->type == QSEECOM_LISTENER_SERVICE) {
+		lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
+		this_lstnr = __qseecom_find_svc(data->listener.id);
+		if (IS_ERR_OR_NULL(this_lstnr)) {
+			pr_err("Invalid listener ID\n");
+			return -ENOMEM;
+		}
+	} else {
+		req = (struct qseecom_send_modfd_cmd_req *)msg;
+	}
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+						(req->ifd_data[i].fd > 0)) {
+			ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+					req->ifd_data[i].fd);
+			if (IS_ERR_OR_NULL(ihandle)) {
+				pr_err("Ion client can't retrieve the handle\n");
+				return -ENOMEM;
+			}
+			field = (char *) req->cmd_req_buf +
+				req->ifd_data[i].cmd_buf_offset;
+		} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+				(lstnr_resp->ifd_data[i].fd > 0)) {
+			ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+						lstnr_resp->ifd_data[i].fd);
+			if (IS_ERR_OR_NULL(ihandle)) {
+				pr_err("Ion client can't retrieve the handle\n");
+				return -ENOMEM;
+			}
+			field = lstnr_resp->resp_buf_ptr +
+				lstnr_resp->ifd_data[i].cmd_buf_offset;
+		} else {
+			continue;
+		}
+		/* Populate the cmd data structure with the phys_addr */
+		sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
+		if (IS_ERR_OR_NULL(sg_ptr)) {
+			pr_err("IOn client could not retrieve sg table\n");
+			goto err;
+		}
+		if (sg_ptr->nents == 0) {
+			pr_err("Num of scattered entries is 0\n");
+			goto err;
+		}
+		if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
+			pr_warn("Num of scattered entries");
+			pr_warn(" (%d) is greater than %d\n",
+				sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
+			if (cleanup) {
+				if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
+					data->client.sec_buf_fd[i].vbase)
+					dma_free_coherent(qseecom.pdev,
+					data->client.sec_buf_fd[i].size,
+					data->client.sec_buf_fd[i].vbase,
+					data->client.sec_buf_fd[i].pbase);
+			} else {
+				ret = __qseecom_allocate_sg_list_buffer(data,
+						field, i, sg_ptr);
+				if (ret) {
+					pr_err("Failed to allocate sg list buffer\n");
+					goto err;
+				}
+			}
+			len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
+			sg = sg_ptr->sgl;
+			goto cleanup;
+		}
+		sg = sg_ptr->sgl;
+		if (sg_ptr->nents == 1) {
+			uint64_t *update_64bit;
+
+			if (__boundary_checks_offset(req, lstnr_resp, data, i))
+				goto err;
+				/* 64bit app uses 64bit address */
+			update_64bit = (uint64_t *) field;
+			*update_64bit = cleanup ? 0 :
+					(uint64_t)sg_dma_address(sg_ptr->sgl);
+			len += (uint32_t)sg->length;
+		} else {
+			struct qseecom_sg_entry_64bit *update_64bit;
+			int j = 0;
+
+			if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+					(req->ifd_data[i].fd > 0)) {
+
+				if ((req->cmd_req_len <
+					 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
+					(req->ifd_data[i].cmd_buf_offset >
+					(req->cmd_req_len -
+					SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
+					pr_err("Invalid offset = 0x%x\n",
+					req->ifd_data[i].cmd_buf_offset);
+					goto err;
+				}
+
+			} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+					(lstnr_resp->ifd_data[i].fd > 0)) {
+
+				if ((lstnr_resp->resp_len <
+					SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
+				(lstnr_resp->ifd_data[i].cmd_buf_offset >
+						(lstnr_resp->resp_len -
+					SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
+					goto err;
+				}
+			}
+			/* 64bit app uses 64bit address */
+			update_64bit = (struct qseecom_sg_entry_64bit *)field;
+			for (j = 0; j < sg_ptr->nents; j++) {
+				update_64bit->phys_addr = cleanup ? 0 :
+					(uint64_t)sg_dma_address(sg);
+				update_64bit->len = cleanup ? 0 :
+						(uint32_t)sg->length;
+				update_64bit++;
+				len += sg->length;
+				sg = sg_next(sg);
+			}
+		}
+cleanup:
+		if (cleanup) {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+					ihandle, NULL, len,
+					ION_IOC_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+		} else {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+					ihandle, NULL, len,
+					ION_IOC_CLEAN_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+			if (data->type == QSEECOM_CLIENT_APP) {
+				offset = req->ifd_data[i].cmd_buf_offset;
+				data->sglistinfo_ptr[i].indexAndFlags =
+					SGLISTINFO_SET_INDEX_FLAG(
+					(sg_ptr->nents == 1), 1, offset);
+				data->sglistinfo_ptr[i].sizeOrCount =
+					(sg_ptr->nents == 1) ?
+					sg->length : sg_ptr->nents;
+				data->sglist_cnt = i + 1;
+			} else {
+				offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
+					+ (uintptr_t)lstnr_resp->resp_buf_ptr -
+					(uintptr_t)this_lstnr->sb_virt);
+				this_lstnr->sglistinfo_ptr[i].indexAndFlags =
+					SGLISTINFO_SET_INDEX_FLAG(
+					(sg_ptr->nents == 1), 1, offset);
+				this_lstnr->sglistinfo_ptr[i].sizeOrCount =
+					(sg_ptr->nents == 1) ?
+					sg->length : sg_ptr->nents;
+				this_lstnr->sglist_cnt = i + 1;
+			}
+		}
+		/* Deallocate the handle */
+		if (!IS_ERR_OR_NULL(ihandle))
+			ion_free(qseecom.ion_clnt, ihandle);
+	}
+	return ret;
+err:
+	for (i = 0; i < MAX_ION_FD; i++)
+		if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
+			data->client.sec_buf_fd[i].vbase)
+			dma_free_coherent(qseecom.pdev,
+				data->client.sec_buf_fd[i].size,
+				data->client.sec_buf_fd[i].vbase,
+				data->client.sec_buf_fd[i].pbase);
+	if (!IS_ERR_OR_NULL(ihandle))
+		ion_free(qseecom.ion_clnt, ihandle);
+	return -ENOMEM;
+}
+
+static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
+					void __user *argp,
+					bool is_64bit_addr)
+{
+	int ret = 0;
+	int i;
+	struct qseecom_send_modfd_cmd_req req;
+	struct qseecom_send_cmd_req send_cmd_req;
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	send_cmd_req.cmd_req_buf = req.cmd_req_buf;
+	send_cmd_req.cmd_req_len = req.cmd_req_len;
+	send_cmd_req.resp_buf = req.resp_buf;
+	send_cmd_req.resp_len = req.resp_len;
+
+	if (__validate_send_cmd_inputs(data, &send_cmd_req))
+		return -EINVAL;
+
+	/* validate offsets */
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
+			pr_err("Invalid offset %d = 0x%x\n",
+				i, req.ifd_data[i].cmd_buf_offset);
+			return -EINVAL;
+		}
+	}
+	req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req.cmd_req_buf);
+	req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req.resp_buf);
+
+	if (!is_64bit_addr) {
+		ret = __qseecom_update_cmd_buf(&req, false, data);
+		if (ret)
+			return ret;
+		ret = __qseecom_send_cmd(data, &send_cmd_req);
+		if (ret)
+			return ret;
+		ret = __qseecom_update_cmd_buf(&req, true, data);
+		if (ret)
+			return ret;
+	} else {
+		ret = __qseecom_update_cmd_buf_64(&req, false, data);
+		if (ret)
+			return ret;
+		ret = __qseecom_send_cmd(data, &send_cmd_req);
+		if (ret)
+			return ret;
+		ret = __qseecom_update_cmd_buf_64(&req, true, data);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+	return __qseecom_send_modfd_cmd(data, argp, false);
+}
+
+static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+	return __qseecom_send_modfd_cmd(data, argp, true);
+}
+
+
+
+static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
+		struct qseecom_registered_listener_list *svc)
+{
+	int ret;
+
+	ret = (svc->rcv_req_flag != 0);
+	return ret || data->abort;
+}
+
+static int qseecom_receive_req(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+	struct qseecom_registered_listener_list *this_lstnr;
+
+	this_lstnr = __qseecom_find_svc(data->listener.id);
+	if (!this_lstnr) {
+		pr_err("Invalid listener ID\n");
+		return -ENODATA;
+	}
+
+	while (1) {
+		if (wait_event_freezable(this_lstnr->rcv_req_wq,
+				__qseecom_listener_has_rcvd_req(data,
+				this_lstnr))) {
+			pr_debug("Interrupted: exiting Listener Service = %d\n",
+						(uint32_t)data->listener.id);
+			/* woken up for different reason */
+			return -ERESTARTSYS;
+		}
+
+		if (data->abort) {
+			pr_err("Aborting Listener Service = %d\n",
+						(uint32_t)data->listener.id);
+			return -ENODEV;
+		}
+		this_lstnr->rcv_req_flag = 0;
+		break;
+	}
+	return ret;
+}
+
+static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
+{
+	unsigned char app_arch = 0;
+	struct elf32_hdr *ehdr;
+	struct elf64_hdr *ehdr64;
+
+	app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
+
+	switch (app_arch) {
+	case ELFCLASS32: {
+		ehdr = (struct elf32_hdr *)fw_entry->data;
+		if (fw_entry->size < sizeof(*ehdr)) {
+			pr_err("%s: Not big enough to be an elf32 header\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+			pr_err("%s: Not an elf32 header\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (ehdr->e_phnum == 0) {
+			pr_err("%s: No loadable segments\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
+		    sizeof(struct elf32_hdr) > fw_entry->size) {
+			pr_err("%s: Program headers not within mdt\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		break;
+	}
+	case ELFCLASS64: {
+		ehdr64 = (struct elf64_hdr *)fw_entry->data;
+		if (fw_entry->size < sizeof(*ehdr64)) {
+			pr_err("%s: Not big enough to be an elf64 header\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
+			pr_err("%s: Not an elf64 header\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (ehdr64->e_phnum == 0) {
+			pr_err("%s: No loadable segments\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
+		    sizeof(struct elf64_hdr) > fw_entry->size) {
+			pr_err("%s: Program headers not within mdt\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		break;
+	}
+	default: {
+		pr_err("QSEE app arch %u is not supported\n", app_arch);
+		return false;
+	}
+	}
+	return true;
+}
+
+static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
+					uint32_t *app_arch)
+{
+	int ret = -1;
+	int i = 0, rc = 0;
+	const struct firmware *fw_entry = NULL;
+	char fw_name[MAX_APP_NAME_SIZE];
+	struct elf32_hdr *ehdr;
+	struct elf64_hdr *ehdr64;
+	int num_images = 0;
+
+	snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
+	rc = request_firmware(&fw_entry, fw_name,  qseecom.pdev);
+	if (rc) {
+		pr_err("error with request_firmware\n");
+		ret = -EIO;
+		goto err;
+	}
+	if (!__qseecom_is_fw_image_valid(fw_entry)) {
+		ret = -EIO;
+		goto err;
+	}
+	*app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
+	*fw_size = fw_entry->size;
+	if (*app_arch == ELFCLASS32) {
+		ehdr = (struct elf32_hdr *)fw_entry->data;
+		num_images = ehdr->e_phnum;
+	} else if (*app_arch == ELFCLASS64) {
+		ehdr64 = (struct elf64_hdr *)fw_entry->data;
+		num_images = ehdr64->e_phnum;
+	} else {
+		pr_err("QSEE %s app, arch %u is not supported\n",
+						appname, *app_arch);
+		ret = -EIO;
+		goto err;
+	}
+	pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
+	release_firmware(fw_entry);
+	fw_entry = NULL;
+	for (i = 0; i < num_images; i++) {
+		memset(fw_name, 0, sizeof(fw_name));
+		snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
+		ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
+		if (ret)
+			goto err;
+		if (*fw_size > U32_MAX - fw_entry->size) {
+			pr_err("QSEE %s app file size overflow\n", appname);
+			ret = -EINVAL;
+			goto err;
+		}
+		*fw_size += fw_entry->size;
+		release_firmware(fw_entry);
+		fw_entry = NULL;
+	}
+
+	return ret;
+err:
+	if (fw_entry)
+		release_firmware(fw_entry);
+	*fw_size = 0;
+	return ret;
+}
+
+static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
+				uint32_t fw_size,
+				struct qseecom_load_app_ireq *load_req)
+{
+	int ret = -1;
+	int i = 0, rc = 0;
+	const struct firmware *fw_entry = NULL;
+	char fw_name[MAX_APP_NAME_SIZE];
+	u8 *img_data_ptr = img_data;
+	struct elf32_hdr *ehdr;
+	struct elf64_hdr *ehdr64;
+	int num_images = 0;
+	unsigned char app_arch = 0;
+
+	snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
+	rc = request_firmware(&fw_entry, fw_name,  qseecom.pdev);
+	if (rc) {
+		ret = -EIO;
+		goto err;
+	}
+
+	load_req->img_len = fw_entry->size;
+	if (load_req->img_len > fw_size) {
+		pr_err("app %s size %zu is larger than buf size %u\n",
+			appname, fw_entry->size, fw_size);
+		ret = -EINVAL;
+		goto err;
+	}
+	memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
+	img_data_ptr = img_data_ptr + fw_entry->size;
+	load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
+
+	app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
+	if (app_arch == ELFCLASS32) {
+		ehdr = (struct elf32_hdr *)fw_entry->data;
+		num_images = ehdr->e_phnum;
+	} else if (app_arch == ELFCLASS64) {
+		ehdr64 = (struct elf64_hdr *)fw_entry->data;
+		num_images = ehdr64->e_phnum;
+	} else {
+		pr_err("QSEE %s app, arch %u is not supported\n",
+						appname, app_arch);
+		ret = -EIO;
+		goto err;
+	}
+	release_firmware(fw_entry);
+	fw_entry = NULL;
+	for (i = 0; i < num_images; i++) {
+		snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
+		ret = request_firmware(&fw_entry, fw_name,  qseecom.pdev);
+		if (ret) {
+			pr_err("Failed to locate blob %s\n", fw_name);
+			goto err;
+		}
+		if ((fw_entry->size > U32_MAX - load_req->img_len) ||
+			(fw_entry->size + load_req->img_len > fw_size)) {
+			pr_err("Invalid file size for %s\n", fw_name);
+			ret = -EINVAL;
+			goto err;
+		}
+		memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
+		img_data_ptr = img_data_ptr + fw_entry->size;
+		load_req->img_len += fw_entry->size;
+		release_firmware(fw_entry);
+		fw_entry = NULL;
+	}
+	return ret;
+err:
+	release_firmware(fw_entry);
+	return ret;
+}
+
+static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
+			u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
+{
+	size_t len = 0;
+	int ret = 0;
+	ion_phys_addr_t pa;
+	struct ion_handle *ihandle = NULL;
+	u8 *img_data = NULL;
+
+	ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
+			SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+
+	if (IS_ERR_OR_NULL(ihandle)) {
+		pr_err("ION alloc failed\n");
+		return -ENOMEM;
+	}
+	img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
+					ihandle);
+
+	if (IS_ERR_OR_NULL(img_data)) {
+		pr_err("ION memory mapping for image loading failed\n");
+		ret = -ENOMEM;
+		goto exit_ion_free;
+	}
+	/* Get the physical address of the ION BUF */
+	ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
+	if (ret) {
+		pr_err("physical memory retrieval failure\n");
+		ret = -EIO;
+		goto exit_ion_unmap_kernel;
+	}
+
+	*pihandle = ihandle;
+	*data = img_data;
+	*paddr = pa;
+	return ret;
+
+exit_ion_unmap_kernel:
+	ion_unmap_kernel(qseecom.ion_clnt, ihandle);
+exit_ion_free:
+	ion_free(qseecom.ion_clnt, ihandle);
+	ihandle = NULL;
+	return ret;
+}
+
+static void __qseecom_free_img_data(struct ion_handle **ihandle)
+{
+	ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
+	ion_free(qseecom.ion_clnt, *ihandle);
+	*ihandle = NULL;
+}
+
+static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
+				uint32_t *app_id)
+{
+	int ret = -1;
+	uint32_t fw_size = 0;
+	struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
+	struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
+	struct qseecom_command_scm_resp resp;
+	u8 *img_data = NULL;
+	ion_phys_addr_t pa = 0;
+	struct ion_handle *ihandle = NULL;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	uint32_t app_arch = 0;
+
+	if (!data || !appname || !app_id) {
+		pr_err("Null pointer to data or appname or appid\n");
+		return -EINVAL;
+	}
+	*app_id = 0;
+	if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
+		return -EIO;
+	data->client.app_arch = app_arch;
+
+	/* Check and load cmnlib */
+	if (qseecom.qsee_version > QSEEE_VERSION_00) {
+		if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
+			ret = qseecom_load_commonlib_image(data, "cmnlib");
+			if (ret) {
+				pr_err("failed to load cmnlib\n");
+				return -EIO;
+			}
+			qseecom.commonlib_loaded = true;
+			pr_debug("cmnlib is loaded\n");
+		}
+
+		if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
+			ret = qseecom_load_commonlib_image(data, "cmnlib64");
+			if (ret) {
+				pr_err("failed to load cmnlib64\n");
+				return -EIO;
+			}
+			qseecom.commonlib64_loaded = true;
+			pr_debug("cmnlib64 is loaded\n");
+		}
+	}
+
+	ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
+	if (ret)
+		return ret;
+
+	ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
+	if (ret) {
+		ret = -EIO;
+		goto exit_free_img_data;
+	}
+
+	/* Populate the load_req parameters */
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+		load_req.mdt_len = load_req.mdt_len;
+		load_req.img_len = load_req.img_len;
+		strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
+		load_req.phy_addr = (uint32_t)pa;
+		cmd_buf = (void *)&load_req;
+		cmd_len = sizeof(struct qseecom_load_app_ireq);
+	} else {
+		load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+		load_req_64bit.mdt_len = load_req.mdt_len;
+		load_req_64bit.img_len = load_req.img_len;
+		strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
+		load_req_64bit.phy_addr = (uint64_t)pa;
+		cmd_buf = (void *)&load_req_64bit;
+		cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+		mutex_unlock(&qsee_bw_mutex);
+		if (ret) {
+			ret = -EIO;
+			goto exit_free_img_data;
+		}
+	}
+
+	ret = __qseecom_enable_clk_scale_up(data);
+	if (ret) {
+		ret = -EIO;
+		goto exit_unregister_bus_bw_need;
+	}
+
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
+				img_data, fw_size,
+				ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit_disable_clk_vote;
+	}
+
+	/* SCM_CALL to load the image */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+			&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to load failed : ret %d\n", ret);
+		ret = -EIO;
+		goto exit_disable_clk_vote;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		*app_id = resp.data;
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret)
+			pr_err("process_incomplete_cmd FAILED\n");
+		else
+			*app_id = resp.data;
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
+		break;
+	default:
+		pr_err("scm call return unknown response %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	}
+
+exit_disable_clk_vote:
+	__qseecom_disable_clk_scale_down(data);
+
+exit_unregister_bus_bw_need:
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		qseecom_unregister_bus_bandwidth_needs(data);
+		mutex_unlock(&qsee_bw_mutex);
+	}
+
+exit_free_img_data:
+	__qseecom_free_img_data(&ihandle);
+	return ret;
+}
+
+static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
+					char *cmnlib_name)
+{
+	int ret = 0;
+	uint32_t fw_size = 0;
+	struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
+	struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
+	struct qseecom_command_scm_resp resp;
+	u8 *img_data = NULL;
+	ion_phys_addr_t pa = 0;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	uint32_t app_arch = 0;
+
+	if (!cmnlib_name) {
+		pr_err("cmnlib_name is NULL\n");
+		return -EINVAL;
+	}
+	if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
+		pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
+			cmnlib_name, strlen(cmnlib_name));
+		return -EINVAL;
+	}
+
+	if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
+		return -EIO;
+
+	ret = __qseecom_allocate_img_data(&qseecom.cmnlib_ion_handle,
+						&img_data, fw_size, &pa);
+	if (ret)
+		return -EIO;
+
+	ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
+	if (ret) {
+		ret = -EIO;
+		goto exit_free_img_data;
+	}
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		load_req.phy_addr = (uint32_t)pa;
+		load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
+		cmd_buf = (void *)&load_req;
+		cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
+	} else {
+		load_req_64bit.phy_addr = (uint64_t)pa;
+		load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
+		load_req_64bit.img_len = load_req.img_len;
+		load_req_64bit.mdt_len = load_req.mdt_len;
+		cmd_buf = (void *)&load_req_64bit;
+		cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+		mutex_unlock(&qsee_bw_mutex);
+		if (ret) {
+			ret = -EIO;
+			goto exit_free_img_data;
+		}
+	}
+
+	/* Vote for the SFPB clock */
+	ret = __qseecom_enable_clk_scale_up(data);
+	if (ret) {
+		ret = -EIO;
+		goto exit_unregister_bus_bw_need;
+	}
+
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, qseecom.cmnlib_ion_handle,
+				img_data, fw_size,
+				ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit_disable_clk_vote;
+	}
+
+	/* SCM_CALL to load the image */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+							&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to load failed : ret %d\n", ret);
+		ret = -EIO;
+		goto exit_disable_clk_vote;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("scm call failed w/response result%d\n", resp.result);
+		ret = -EINVAL;
+		goto exit_disable_clk_vote;
+	case  QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd failed err: %d\n", ret);
+			goto exit_disable_clk_vote;
+		}
+		break;
+	default:
+		pr_err("scm call return unknown response %d\n",	resp.result);
+		ret = -EINVAL;
+		goto exit_disable_clk_vote;
+	}
+
+exit_disable_clk_vote:
+	__qseecom_disable_clk_scale_down(data);
+
+exit_unregister_bus_bw_need:
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		qseecom_unregister_bus_bandwidth_needs(data);
+		mutex_unlock(&qsee_bw_mutex);
+	}
+
+exit_free_img_data:
+	__qseecom_free_img_data(&qseecom.cmnlib_ion_handle);
+	return ret;
+}
+
+static int qseecom_unload_commonlib_image(void)
+{
+	int ret = -EINVAL;
+	struct qseecom_unload_lib_image_ireq unload_req = {0};
+	struct qseecom_command_scm_resp resp;
+
+	/* Populate the remaining parameters */
+	unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
+
+	/* SCM_CALL to load the image */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
+			sizeof(struct qseecom_unload_lib_image_ireq),
+						&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to unload lib failed : ret %d\n", ret);
+		ret = -EIO;
+	} else {
+		switch (resp.result) {
+		case QSEOS_RESULT_SUCCESS:
+			break;
+		case QSEOS_RESULT_FAILURE:
+			pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
+			break;
+		default:
+			pr_err("scm call return unknown response %d\n",
+					resp.result);
+			ret = -EINVAL;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+int qseecom_start_app(struct qseecom_handle **handle,
+						char *app_name, uint32_t size)
+{
+	int32_t ret = 0;
+	unsigned long flags = 0;
+	struct qseecom_dev_handle *data = NULL;
+	struct qseecom_check_app_ireq app_ireq;
+	struct qseecom_registered_app_list *entry = NULL;
+	struct qseecom_registered_kclient_list *kclient_entry = NULL;
+	bool found_app = false;
+	size_t len;
+	ion_phys_addr_t pa;
+	uint32_t fw_size, app_arch;
+	uint32_t app_id = 0;
+
+	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
+		pr_err("Not allowed to be called in %d state\n",
+				atomic_read(&qseecom.qseecom_state));
+		return -EPERM;
+	}
+	if (!app_name) {
+		pr_err("failed to get the app name\n");
+		return -EINVAL;
+	}
+
+	if (strlen(app_name) >= MAX_APP_NAME_SIZE) {
+		pr_err("The app_name (%s) with length %zu is not valid\n",
+			app_name, strlen(app_name));
+		return -EINVAL;
+	}
+
+	*handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
+	if (!(*handle))
+		return -ENOMEM;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data) {
+		if (ret == 0) {
+			kfree(*handle);
+			*handle = NULL;
+		}
+		return -ENOMEM;
+	}
+	data->abort = 0;
+	data->type = QSEECOM_CLIENT_APP;
+	data->released = false;
+	data->client.sb_length = size;
+	data->client.user_virt_sb_base = 0;
+	data->client.ihandle = NULL;
+
+	init_waitqueue_head(&data->abort_wq);
+
+	data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
+				ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+	if (IS_ERR_OR_NULL(data->client.ihandle)) {
+		pr_err("Ion client could not retrieve the handle\n");
+		kfree(data);
+		kfree(*handle);
+		*handle = NULL;
+		return -EINVAL;
+	}
+	mutex_lock(&app_access_lock);
+
+	app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+	strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
+	ret = __qseecom_check_app_exists(app_ireq, &app_id);
+	if (ret)
+		goto err;
+
+	strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
+	if (app_id) {
+		pr_warn("App id %d for [%s] app exists\n", app_id,
+			(char *)app_ireq.app_name);
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(entry,
+				&qseecom.registered_app_list_head, list){
+			if (entry->app_id == app_id) {
+				entry->ref_cnt++;
+				found_app = true;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+		if (!found_app)
+			pr_warn("App_id %d [%s] was loaded but not registered\n",
+					ret, (char *)app_ireq.app_name);
+	} else {
+		/* load the app and get the app_id  */
+		pr_debug("%s: Loading app for the first time'\n",
+				qseecom.pdev->init_name);
+		ret = __qseecom_load_fw(data, app_name, &app_id);
+		if (ret < 0)
+			goto err;
+	}
+	data->client.app_id = app_id;
+	if (!found_app) {
+		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+		if (!entry) {
+			pr_err("kmalloc for app entry failed\n");
+			ret =  -ENOMEM;
+			goto err;
+		}
+		entry->app_id = app_id;
+		entry->ref_cnt = 1;
+		strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
+		if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
+			ret = -EIO;
+			kfree(entry);
+			goto err;
+		}
+		entry->app_arch = app_arch;
+		entry->app_blocked = false;
+		entry->blocked_on_listener_id = 0;
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_add_tail(&entry->list, &qseecom.registered_app_list_head);
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+									flags);
+	}
+
+	/* Get the physical address of the ION BUF */
+	ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
+	if (ret) {
+		pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+			ret);
+		goto err;
+	}
+
+	/* Populate the structure for sending scm call to load image */
+	data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
+							data->client.ihandle);
+	if (IS_ERR_OR_NULL(data->client.sb_virt)) {
+		pr_err("ION memory mapping for client shared buf failed\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+	data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
+	data->client.sb_phys = (phys_addr_t)pa;
+	(*handle)->dev = (void *)data;
+	(*handle)->sbuf = (unsigned char *)data->client.sb_virt;
+	(*handle)->sbuf_len = data->client.sb_length;
+
+	kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
+	if (!kclient_entry) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	kclient_entry->handle = *handle;
+
+	spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
+	list_add_tail(&kclient_entry->list,
+			&qseecom.registered_kclient_list_head);
+	spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
+
+	mutex_unlock(&app_access_lock);
+	return 0;
+
+err:
+	kfree(data);
+	kfree(*handle);
+	*handle = NULL;
+	mutex_unlock(&app_access_lock);
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_start_app);
+
+int qseecom_shutdown_app(struct qseecom_handle **handle)
+{
+	int ret = -EINVAL;
+	struct qseecom_dev_handle *data;
+
+	struct qseecom_registered_kclient_list *kclient = NULL;
+	unsigned long flags = 0;
+	bool found_handle = false;
+
+	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
+		pr_err("Not allowed to be called in %d state\n",
+				atomic_read(&qseecom.qseecom_state));
+		return -EPERM;
+	}
+
+	if ((handle == NULL)  || (*handle == NULL)) {
+		pr_err("Handle is not initialized\n");
+		return -EINVAL;
+	}
+	data =	(struct qseecom_dev_handle *) ((*handle)->dev);
+	mutex_lock(&app_access_lock);
+
+	spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
+	list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
+				list) {
+		if (kclient->handle == (*handle)) {
+			list_del(&kclient->list);
+			found_handle = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
+	if (!found_handle)
+		pr_err("Unable to find the handle, exiting\n");
+	else
+		ret = qseecom_unload_app(data, false);
+
+	mutex_unlock(&app_access_lock);
+	if (ret == 0) {
+		kzfree(data);
+		kzfree(*handle);
+		kzfree(kclient);
+		*handle = NULL;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_shutdown_app);
+
+int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
+			uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
+{
+	int ret = 0;
+	struct qseecom_send_cmd_req req = {0, 0, 0, 0};
+	struct qseecom_dev_handle *data;
+	bool perf_enabled = false;
+
+	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
+		pr_err("Not allowed to be called in %d state\n",
+				atomic_read(&qseecom.qseecom_state));
+		return -EPERM;
+	}
+
+	if (handle == NULL) {
+		pr_err("Handle is not initialized\n");
+		return -EINVAL;
+	}
+	data = handle->dev;
+
+	req.cmd_req_len = sbuf_len;
+	req.resp_len = rbuf_len;
+	req.cmd_req_buf = send_buf;
+	req.resp_buf = resp_buf;
+
+	if (__validate_send_cmd_inputs(data, &req))
+		return -EINVAL;
+
+	mutex_lock(&app_access_lock);
+	if (qseecom.support_bus_scaling) {
+		ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
+		if (ret) {
+			pr_err("Failed to set bw.\n");
+			mutex_unlock(&app_access_lock);
+			return ret;
+		}
+	}
+	/*
+	 * On targets where crypto clock is handled by HLOS,
+	 * if clk_access_cnt is zero and perf_enabled is false,
+	 * then the crypto clock was not enabled before sending cmd
+	 * to tz, qseecom will enable the clock to avoid service failure.
+	 */
+	if (!qseecom.no_clock_support &&
+		!qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
+		pr_debug("ce clock is not enabled!\n");
+		ret = qseecom_perf_enable(data);
+		if (ret) {
+			pr_err("Failed to vote for clock with err %d\n",
+						ret);
+			mutex_unlock(&app_access_lock);
+			return -EINVAL;
+		}
+		perf_enabled = true;
+	}
+	if (!strcmp(data->client.app_name, "securemm"))
+		data->use_legacy_cmd = true;
+
+	ret = __qseecom_send_cmd(data, &req);
+	data->use_legacy_cmd = false;
+	if (qseecom.support_bus_scaling)
+		__qseecom_add_bw_scale_down_timer(
+			QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+
+	if (perf_enabled) {
+		qsee_disable_clock_vote(data, CLK_DFAB);
+		qsee_disable_clock_vote(data, CLK_SFPB);
+	}
+
+	mutex_unlock(&app_access_lock);
+
+	if (ret)
+		return ret;
+
+	pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
+			req.resp_len, req.resp_buf);
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_send_command);
+
+int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
+{
+	int ret = 0;
+
+	if ((handle == NULL) || (handle->dev == NULL)) {
+		pr_err("No valid kernel client\n");
+		return -EINVAL;
+	}
+	if (high) {
+		if (qseecom.support_bus_scaling) {
+			mutex_lock(&qsee_bw_mutex);
+			__qseecom_register_bus_bandwidth_needs(handle->dev,
+									HIGH);
+			mutex_unlock(&qsee_bw_mutex);
+		} else {
+			ret = qseecom_perf_enable(handle->dev);
+			if (ret)
+				pr_err("Failed to vote for clock with err %d\n",
+						ret);
+		}
+	} else {
+		if (!qseecom.support_bus_scaling) {
+			qsee_disable_clock_vote(handle->dev, CLK_DFAB);
+			qsee_disable_clock_vote(handle->dev, CLK_SFPB);
+		} else {
+			mutex_lock(&qsee_bw_mutex);
+			qseecom_unregister_bus_bandwidth_needs(handle->dev);
+			mutex_unlock(&qsee_bw_mutex);
+		}
+	}
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_set_bandwidth);
+
+int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
+{
+	struct qseecom_registered_app_list dummy_app_entry = { {0} };
+	struct qseecom_dev_handle dummy_private_data = {0};
+	struct qseecom_command_scm_resp resp;
+	int ret = 0;
+
+	if (!desc) {
+		pr_err("desc is NULL\n");
+		return -EINVAL;
+	}
+
+	resp.result = desc->ret[0];	/*req_cmd*/
+	resp.resp_type = desc->ret[1];	/*app_id*/
+	resp.data = desc->ret[2];	/*listener_id*/
+
+	dummy_private_data.client.app_id = desc->ret[1];
+	dummy_app_entry.app_id = desc->ret[1];
+
+	mutex_lock(&app_access_lock);
+	ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
+					&dummy_private_data);
+	mutex_unlock(&app_access_lock);
+	if (ret)
+		pr_err("Failed to req cmd %d lsnr %d on app %d, ret = %d\n",
+			(int)desc->ret[0], (int)desc->ret[2],
+			(int)desc->ret[1], ret);
+	desc->ret[0] = resp.result;
+	desc->ret[1] = resp.resp_type;
+	desc->ret[2] = resp.data;
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
+
+static int qseecom_send_resp(void)
+{
+	qseecom.send_resp_flag = 1;
+	wake_up_interruptible(&qseecom.send_resp_wq);
+	return 0;
+}
+
+static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
+{
+	struct qseecom_registered_listener_list *this_lstnr = NULL;
+
+	pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
+	this_lstnr = __qseecom_find_svc(data->listener.id);
+	if (this_lstnr == NULL)
+		return -EINVAL;
+	qseecom.send_resp_flag = 1;
+	this_lstnr->send_resp_flag = 1;
+	wake_up_interruptible(&qseecom.send_resp_wq);
+	return 0;
+}
+
+static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
+			struct qseecom_send_modfd_listener_resp *resp,
+			struct qseecom_registered_listener_list *this_lstnr)
+{
+	int i;
+
+	if (!data || !resp || !this_lstnr) {
+		pr_err("listener handle or resp msg is null\n");
+		return -EINVAL;
+	}
+
+	if (resp->resp_buf_ptr == NULL) {
+		pr_err("resp buffer is null\n");
+		return -EINVAL;
+	}
+	/* validate resp buf length */
+	if ((resp->resp_len == 0) ||
+			(resp->resp_len > this_lstnr->sb_length)) {
+		pr_err("resp buf length %d not valid\n", resp->resp_len);
+		return -EINVAL;
+	}
+
+	if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
+		pr_err("Integer overflow in resp_len & resp_buf\n");
+		return -EINVAL;
+	}
+	if ((uintptr_t)this_lstnr->user_virt_sb_base >
+					(ULONG_MAX - this_lstnr->sb_length)) {
+		pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+		return -EINVAL;
+	}
+	/* validate resp buf */
+	if (((uintptr_t)resp->resp_buf_ptr <
+		(uintptr_t)this_lstnr->user_virt_sb_base) ||
+		((uintptr_t)resp->resp_buf_ptr >=
+		((uintptr_t)this_lstnr->user_virt_sb_base +
+				this_lstnr->sb_length)) ||
+		(((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
+		((uintptr_t)this_lstnr->user_virt_sb_base +
+						this_lstnr->sb_length))) {
+		pr_err("resp buf is out of shared buffer region\n");
+		return -EINVAL;
+	}
+
+	/* validate offsets */
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
+			pr_err("Invalid offset %d = 0x%x\n",
+				i, resp->ifd_data[i].cmd_buf_offset);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
+				void __user *argp, bool is_64bit_addr)
+{
+	struct qseecom_send_modfd_listener_resp resp;
+	struct qseecom_registered_listener_list *this_lstnr = NULL;
+
+	if (copy_from_user(&resp, argp, sizeof(resp))) {
+		pr_err("copy_from_user failed");
+		return -EINVAL;
+	}
+
+	this_lstnr = __qseecom_find_svc(data->listener.id);
+	if (this_lstnr == NULL)
+		return -EINVAL;
+
+	if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
+		return -EINVAL;
+
+	resp.resp_buf_ptr = this_lstnr->sb_virt +
+		(uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
+
+	if (!is_64bit_addr)
+		__qseecom_update_cmd_buf(&resp, false, data);
+	else
+		__qseecom_update_cmd_buf_64(&resp, false, data);
+	qseecom.send_resp_flag = 1;
+	this_lstnr->send_resp_flag = 1;
+	wake_up_interruptible(&qseecom.send_resp_wq);
+	return 0;
+}
+
+static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	return __qseecom_send_modfd_resp(data, argp, false);
+}
+
+static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	return __qseecom_send_modfd_resp(data, argp, true);
+}
+
+static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	struct qseecom_qseos_version_req req;
+
+	if (copy_from_user(&req, argp, sizeof(req))) {
+		pr_err("copy_from_user failed");
+		return -EINVAL;
+	}
+	req.qseos_version = qseecom.qseos_version;
+	if (copy_to_user(argp, &req, sizeof(req))) {
+		pr_err("copy_to_user failed");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
+{
+	int rc = 0;
+	struct qseecom_clk *qclk = NULL;
+
+	if (qseecom.no_clock_support)
+		return 0;
+
+	if (ce == CLK_QSEE)
+		qclk = &qseecom.qsee;
+	if (ce == CLK_CE_DRV)
+		qclk = &qseecom.ce_drv;
+
+	if (qclk == NULL) {
+		pr_err("CLK type not supported\n");
+		return -EINVAL;
+	}
+	mutex_lock(&clk_access_lock);
+
+	if (qclk->clk_access_cnt == ULONG_MAX) {
+		pr_err("clk_access_cnt beyond limitation\n");
+		goto err;
+	}
+	if (qclk->clk_access_cnt > 0) {
+		qclk->clk_access_cnt++;
+		mutex_unlock(&clk_access_lock);
+		return rc;
+	}
+
+	/* Enable CE core clk */
+	if (qclk->ce_core_clk != NULL) {
+		rc = clk_prepare_enable(qclk->ce_core_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE core clk\n");
+			goto err;
+		}
+	}
+	/* Enable CE clk */
+	if (qclk->ce_clk != NULL) {
+		rc = clk_prepare_enable(qclk->ce_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE iface clk\n");
+			goto ce_clk_err;
+		}
+	}
+	/* Enable AXI clk */
+	if (qclk->ce_bus_clk != NULL) {
+		rc = clk_prepare_enable(qclk->ce_bus_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE bus clk\n");
+			goto ce_bus_clk_err;
+		}
+	}
+	qclk->clk_access_cnt++;
+	mutex_unlock(&clk_access_lock);
+	return 0;
+
+ce_bus_clk_err:
+	if (qclk->ce_clk != NULL)
+		clk_disable_unprepare(qclk->ce_clk);
+ce_clk_err:
+	if (qclk->ce_core_clk != NULL)
+		clk_disable_unprepare(qclk->ce_core_clk);
+err:
+	mutex_unlock(&clk_access_lock);
+	return -EIO;
+}
+
+static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
+{
+	struct qseecom_clk *qclk;
+
+	if (qseecom.no_clock_support)
+		return;
+
+	if (ce == CLK_QSEE)
+		qclk = &qseecom.qsee;
+	else
+		qclk = &qseecom.ce_drv;
+
+	mutex_lock(&clk_access_lock);
+
+	if (qclk->clk_access_cnt == 0) {
+		mutex_unlock(&clk_access_lock);
+		return;
+	}
+
+	if (qclk->clk_access_cnt == 1) {
+		if (qclk->ce_clk != NULL)
+			clk_disable_unprepare(qclk->ce_clk);
+		if (qclk->ce_core_clk != NULL)
+			clk_disable_unprepare(qclk->ce_core_clk);
+		if (qclk->ce_bus_clk != NULL)
+			clk_disable_unprepare(qclk->ce_bus_clk);
+	}
+	qclk->clk_access_cnt--;
+	mutex_unlock(&clk_access_lock);
+}
+
+static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
+						int32_t clk_type)
+{
+	int ret = 0;
+	struct qseecom_clk *qclk;
+
+	if (qseecom.no_clock_support)
+		return 0;
+
+	qclk = &qseecom.qsee;
+	if (!qseecom.qsee_perf_client)
+		return ret;
+
+	switch (clk_type) {
+	case CLK_DFAB:
+		mutex_lock(&qsee_bw_mutex);
+		if (!qseecom.qsee_bw_count) {
+			if (qseecom.qsee_sfpb_bw_count > 0)
+				ret = msm_bus_scale_client_update_request(
+					qseecom.qsee_perf_client, 3);
+			else {
+				if (qclk->ce_core_src_clk != NULL)
+					ret = __qseecom_enable_clk(CLK_QSEE);
+				if (!ret) {
+					ret =
+					msm_bus_scale_client_update_request(
+						qseecom.qsee_perf_client, 1);
+					if ((ret) &&
+						(qclk->ce_core_src_clk != NULL))
+						__qseecom_disable_clk(CLK_QSEE);
+				}
+			}
+			if (ret)
+				pr_err("DFAB Bandwidth req failed (%d)\n",
+								ret);
+			else {
+				qseecom.qsee_bw_count++;
+				data->perf_enabled = true;
+			}
+		} else {
+			qseecom.qsee_bw_count++;
+			data->perf_enabled = true;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	case CLK_SFPB:
+		mutex_lock(&qsee_bw_mutex);
+		if (!qseecom.qsee_sfpb_bw_count) {
+			if (qseecom.qsee_bw_count > 0)
+				ret = msm_bus_scale_client_update_request(
+					qseecom.qsee_perf_client, 3);
+			else {
+				if (qclk->ce_core_src_clk != NULL)
+					ret = __qseecom_enable_clk(CLK_QSEE);
+				if (!ret) {
+					ret =
+					msm_bus_scale_client_update_request(
+						qseecom.qsee_perf_client, 2);
+					if ((ret) &&
+						(qclk->ce_core_src_clk != NULL))
+						__qseecom_disable_clk(CLK_QSEE);
+				}
+			}
+
+			if (ret)
+				pr_err("SFPB Bandwidth req failed (%d)\n",
+								ret);
+			else {
+				qseecom.qsee_sfpb_bw_count++;
+				data->fast_load_enabled = true;
+			}
+		} else {
+			qseecom.qsee_sfpb_bw_count++;
+			data->fast_load_enabled = true;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	default:
+		pr_err("Clock type not defined\n");
+		break;
+	}
+	return ret;
+}
+
+static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
+						int32_t clk_type)
+{
+	int32_t ret = 0;
+	struct qseecom_clk *qclk;
+
+	qclk = &qseecom.qsee;
+
+	if (qseecom.no_clock_support)
+		return;
+	if (!qseecom.qsee_perf_client)
+		return;
+
+	switch (clk_type) {
+	case CLK_DFAB:
+		mutex_lock(&qsee_bw_mutex);
+		if (qseecom.qsee_bw_count == 0) {
+			pr_err("Client error.Extra call to disable DFAB clk\n");
+			mutex_unlock(&qsee_bw_mutex);
+			return;
+		}
+
+		if (qseecom.qsee_bw_count == 1) {
+			if (qseecom.qsee_sfpb_bw_count > 0)
+				ret = msm_bus_scale_client_update_request(
+					qseecom.qsee_perf_client, 2);
+			else {
+				ret = msm_bus_scale_client_update_request(
+						qseecom.qsee_perf_client, 0);
+				if ((!ret) && (qclk->ce_core_src_clk != NULL))
+					__qseecom_disable_clk(CLK_QSEE);
+			}
+			if (ret)
+				pr_err("SFPB Bandwidth req fail (%d)\n",
+								ret);
+			else {
+				qseecom.qsee_bw_count--;
+				data->perf_enabled = false;
+			}
+		} else {
+			qseecom.qsee_bw_count--;
+			data->perf_enabled = false;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	case CLK_SFPB:
+		mutex_lock(&qsee_bw_mutex);
+		if (qseecom.qsee_sfpb_bw_count == 0) {
+			pr_err("Client error.Extra call to disable SFPB clk\n");
+			mutex_unlock(&qsee_bw_mutex);
+			return;
+		}
+		if (qseecom.qsee_sfpb_bw_count == 1) {
+			if (qseecom.qsee_bw_count > 0)
+				ret = msm_bus_scale_client_update_request(
+						qseecom.qsee_perf_client, 1);
+			else {
+				ret = msm_bus_scale_client_update_request(
+						qseecom.qsee_perf_client, 0);
+				if ((!ret) && (qclk->ce_core_src_clk != NULL))
+					__qseecom_disable_clk(CLK_QSEE);
+			}
+			if (ret)
+				pr_err("SFPB Bandwidth req fail (%d)\n",
+								ret);
+			else {
+				qseecom.qsee_sfpb_bw_count--;
+				data->fast_load_enabled = false;
+			}
+		} else {
+			qseecom.qsee_sfpb_bw_count--;
+			data->fast_load_enabled = false;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	default:
+		pr_err("Clock type not defined\n");
+		break;
+	}
+
+}
+
+static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct ion_handle *ihandle;	/* Ion handle */
+	struct qseecom_load_img_req load_img_req;
+	int uret = 0;
+	int ret;
+	ion_phys_addr_t pa = 0;
+	size_t len;
+	struct qseecom_load_app_ireq load_req;
+	struct qseecom_load_app_64bit_ireq load_req_64bit;
+	struct qseecom_command_scm_resp resp;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	/* Copy the relevant information needed for loading the image */
+	if (copy_from_user(&load_img_req,
+				(void __user *)argp,
+				sizeof(struct qseecom_load_img_req))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	/* Get the handle of the shared fd */
+	ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+				load_img_req.ifd_data_fd);
+	if (IS_ERR_OR_NULL(ihandle)) {
+		pr_err("Ion client could not retrieve the handle\n");
+		return -ENOMEM;
+	}
+
+	/* Get the physical address of the ION BUF */
+	ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
+	if (ret) {
+		pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+			ret);
+		return ret;
+	}
+	if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
+		pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
+				len, load_img_req.mdt_len,
+				load_img_req.img_len);
+		return ret;
+	}
+	/* Populate the structure for sending scm call to load image */
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
+		load_req.mdt_len = load_img_req.mdt_len;
+		load_req.img_len = load_img_req.img_len;
+		load_req.phy_addr = (uint32_t)pa;
+		cmd_buf = (void *)&load_req;
+		cmd_len = sizeof(struct qseecom_load_app_ireq);
+	} else {
+		load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
+		load_req_64bit.mdt_len = load_img_req.mdt_len;
+		load_req_64bit.img_len = load_img_req.img_len;
+		load_req_64bit.phy_addr = (uint64_t)pa;
+		cmd_buf = (void *)&load_req_64bit;
+		cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+		mutex_unlock(&qsee_bw_mutex);
+		if (ret) {
+			ret = -EIO;
+			goto exit_cpu_restore;
+		}
+	}
+
+	/* Vote for the SFPB clock */
+	ret = __qseecom_enable_clk_scale_up(data);
+	if (ret) {
+		ret = -EIO;
+		goto exit_register_bus_bandwidth_needs;
+	}
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
+				ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit_disable_clock;
+	}
+	/*  SCM_CALL to load the external elf */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+			&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to load failed : ret %d\n",
+				ret);
+		ret = -EFAULT;
+		goto exit_disable_clock;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		pr_err("%s: qseos result incomplete\n", __func__);
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret)
+			pr_err("process_incomplete_cmd failed: err: %d\n", ret);
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
+		ret = -EFAULT;
+		break;
+	default:
+		pr_err("scm_call response result %d not supported\n",
+							resp.result);
+		ret = -EFAULT;
+		break;
+	}
+
+exit_disable_clock:
+	__qseecom_disable_clk_scale_down(data);
+
+exit_register_bus_bandwidth_needs:
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		uret = qseecom_unregister_bus_bandwidth_needs(data);
+		mutex_unlock(&qsee_bw_mutex);
+		if (uret)
+			pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
+								uret, ret);
+	}
+
+exit_cpu_restore:
+	/* Deallocate the handle */
+	if (!IS_ERR_OR_NULL(ihandle))
+		ion_free(qseecom.ion_clnt, ihandle);
+	return ret;
+}
+
+static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_unload_app_ireq req;
+
+	/* unavailable client app */
+	data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
+
+	/* Populate the structure for sending scm call to unload image */
+	req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
+
+	/* SCM_CALL to unload the external elf */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+			sizeof(struct qseecom_unload_app_ireq),
+			&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to unload failed : ret %d\n",
+				ret);
+		ret = -EFAULT;
+		goto qseecom_unload_external_elf_scm_err;
+	}
+	if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret)
+			pr_err("process_incomplete_cmd fail err: %d\n",
+					ret);
+	} else {
+		if (resp.result != QSEOS_RESULT_SUCCESS) {
+			pr_err("scm_call to unload image failed resp.result =%d\n",
+						resp.result);
+			ret = -EFAULT;
+		}
+	}
+
+qseecom_unload_external_elf_scm_err:
+
+	return ret;
+}
+
+static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+
+	int32_t ret;
+	struct qseecom_qseos_app_load_query query_req;
+	struct qseecom_check_app_ireq req;
+	struct qseecom_registered_app_list *entry = NULL;
+	unsigned long flags = 0;
+	uint32_t app_arch = 0, app_id = 0;
+	bool found_app = false;
+
+	/* Copy the relevant information needed for loading the image */
+	if (copy_from_user(&query_req,
+				(void __user *)argp,
+				sizeof(struct qseecom_qseos_app_load_query))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+	query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
+	strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
+
+	ret = __qseecom_check_app_exists(req, &app_id);
+	if (ret) {
+		pr_err(" scm call to check if app is loaded failed");
+		return ret;	/* scm call failed */
+	}
+	if (app_id) {
+		pr_debug("App id %d (%s) already exists\n", app_id,
+			(char *)(req.app_name));
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(entry,
+				&qseecom.registered_app_list_head, list){
+			if (entry->app_id == app_id) {
+				app_arch = entry->app_arch;
+				entry->ref_cnt++;
+				found_app = true;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+		data->client.app_id = app_id;
+		query_req.app_id = app_id;
+		if (app_arch) {
+			data->client.app_arch = app_arch;
+			query_req.app_arch = app_arch;
+		} else {
+			data->client.app_arch = 0;
+			query_req.app_arch = 0;
+		}
+		strlcpy(data->client.app_name, query_req.app_name,
+				MAX_APP_NAME_SIZE);
+		/*
+		 * If app was loaded by appsbl before and was not registered,
+		 * regiser this app now.
+		 */
+		if (!found_app) {
+			pr_debug("Register app %d [%s] which was loaded before\n",
+					ret, (char *)query_req.app_name);
+			entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+			if (!entry) {
+				pr_err("kmalloc for app entry failed\n");
+				return  -ENOMEM;
+			}
+			entry->app_id = app_id;
+			entry->ref_cnt = 1;
+			entry->app_arch = data->client.app_arch;
+			strlcpy(entry->app_name, data->client.app_name,
+				MAX_APP_NAME_SIZE);
+			entry->app_blocked = false;
+			entry->blocked_on_listener_id = 0;
+			spin_lock_irqsave(&qseecom.registered_app_list_lock,
+				flags);
+			list_add_tail(&entry->list,
+				&qseecom.registered_app_list_head);
+			spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+		}
+		if (copy_to_user(argp, &query_req, sizeof(query_req))) {
+			pr_err("copy_to_user failed\n");
+			return -EFAULT;
+		}
+		return -EEXIST;	/* app already loaded */
+	} else {
+		return 0;	/* app not loaded */
+	}
+}
+
+static int __qseecom_get_ce_pipe_info(
+			enum qseecom_key_management_usage_type usage,
+			uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
+{
+	int ret = -EINVAL;
+	int i, j;
+	struct qseecom_ce_info_use *p = NULL;
+	int total = 0;
+	struct qseecom_ce_pipe_entry *pcepipe;
+
+	switch (usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		if (qseecom.support_fde) {
+			p = qseecom.ce_info.fde;
+			total = qseecom.ce_info.num_fde;
+		} else {
+			pr_err("system does not support fde\n");
+			return -EINVAL;
+		}
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		if (qseecom.support_pfe) {
+			p = qseecom.ce_info.pfe;
+			total = qseecom.ce_info.num_pfe;
+		} else {
+			pr_err("system does not support pfe\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("unsupported usage %d\n", usage);
+		return -EINVAL;
+	}
+
+	for (j = 0; j < total; j++) {
+		if (p->unit_num == unit) {
+			pcepipe =  p->ce_pipe_entry;
+			for (i = 0; i < p->num_ce_pipe_entries; i++) {
+				(*ce_hw)[i] = pcepipe->ce_num;
+				*pipe = pcepipe->ce_pipe_pair;
+				pcepipe++;
+			}
+			ret = 0;
+			break;
+		}
+		p++;
+	}
+	return ret;
+}
+
+static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
+			enum qseecom_key_management_usage_type usage,
+			struct qseecom_key_generate_ireq *ireq)
+{
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", usage);
+		return -EFAULT;
+	}
+	ret = __qseecom_enable_clk(CLK_QSEE);
+	if (ret)
+		return ret;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				ireq, sizeof(struct qseecom_key_generate_ireq),
+				&resp, sizeof(resp));
+	if (ret) {
+		if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
+			pr_debug("Key ID exists.\n");
+			ret = 0;
+		} else {
+			pr_err("scm call to generate key failed : %d\n", ret);
+			ret = -EFAULT;
+		}
+		goto generate_key_exit;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
+		pr_debug("Key ID exists.\n");
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
+				pr_debug("Key ID exists.\n");
+				ret = 0;
+			} else {
+				pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+					resp.result);
+			}
+		}
+		break;
+	case QSEOS_RESULT_FAILURE:
+	default:
+		pr_err("gen key scm call failed resp.result %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	}
+generate_key_exit:
+	__qseecom_disable_clk(CLK_QSEE);
+	return ret;
+}
+
+static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
+			enum qseecom_key_management_usage_type usage,
+			struct qseecom_key_delete_ireq *ireq)
+{
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", usage);
+		return -EFAULT;
+	}
+	ret = __qseecom_enable_clk(CLK_QSEE);
+	if (ret)
+		return ret;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				ireq, sizeof(struct qseecom_key_delete_ireq),
+				&resp, sizeof(struct qseecom_command_scm_resp));
+	if (ret) {
+		if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+			pr_debug("Max attempts to input password reached.\n");
+			ret = -ERANGE;
+		} else {
+			pr_err("scm call to delete key failed : %d\n", ret);
+			ret = -EFAULT;
+		}
+		goto del_key_exit;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+					resp.result);
+			if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+				pr_debug("Max attempts to input password reached.\n");
+				ret = -ERANGE;
+			}
+		}
+		break;
+	case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
+		pr_debug("Max attempts to input password reached.\n");
+		ret = -ERANGE;
+		break;
+	case QSEOS_RESULT_FAILURE:
+	default:
+		pr_err("Delete key scm call failed resp.result %d\n",
+							resp.result);
+		ret = -EINVAL;
+		break;
+	}
+del_key_exit:
+	__qseecom_disable_clk(CLK_QSEE);
+	return ret;
+}
+
+static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
+			enum qseecom_key_management_usage_type usage,
+			struct qseecom_key_select_ireq *ireq)
+{
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", usage);
+		return -EFAULT;
+	}
+	ret = __qseecom_enable_clk(CLK_QSEE);
+	if (ret)
+		return ret;
+
+	if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
+		ret = __qseecom_enable_clk(CLK_CE_DRV);
+		if (ret)
+			return ret;
+	}
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				ireq, sizeof(struct qseecom_key_select_ireq),
+				&resp, sizeof(struct qseecom_command_scm_resp));
+	if (ret) {
+		if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+			pr_debug("Max attempts to input password reached.\n");
+			ret = -ERANGE;
+		} else if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+			pr_debug("Set Key operation under processing...\n");
+			ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		} else {
+			pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
+				ret);
+			ret = -EFAULT;
+		}
+		goto set_key_exit;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+					resp.result);
+			if (resp.result ==
+				QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+				pr_debug("Set Key operation under processing...\n");
+				ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+			}
+			if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+				pr_debug("Max attempts to input password reached.\n");
+				ret = -ERANGE;
+			}
+		}
+		break;
+	case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
+		pr_debug("Max attempts to input password reached.\n");
+		ret = -ERANGE;
+		break;
+	case QSEOS_RESULT_FAIL_PENDING_OPERATION:
+		pr_debug("Set Key operation under processing...\n");
+		ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		break;
+	case QSEOS_RESULT_FAILURE:
+	default:
+		pr_err("Set key scm call failed resp.result %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	}
+set_key_exit:
+	__qseecom_disable_clk(CLK_QSEE);
+	if (qseecom.qsee.instance != qseecom.ce_drv.instance)
+		__qseecom_disable_clk(CLK_CE_DRV);
+	return ret;
+}
+
+static int __qseecom_update_current_key_user_info(
+			struct qseecom_dev_handle *data,
+			enum qseecom_key_management_usage_type usage,
+			struct qseecom_key_userinfo_update_ireq *ireq)
+{
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+				usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", usage);
+		return -EFAULT;
+	}
+	ret = __qseecom_enable_clk(CLK_QSEE);
+	if (ret)
+		return ret;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+		ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
+		&resp, sizeof(struct qseecom_command_scm_resp));
+	if (ret) {
+		if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+			pr_debug("Set Key operation under processing...\n");
+			ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		} else {
+			pr_err("scm call to update key userinfo failed: %d\n",
+									ret);
+			__qseecom_disable_clk(CLK_QSEE);
+			return -EFAULT;
+		}
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (resp.result ==
+			QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+			pr_debug("Set Key operation under processing...\n");
+			ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		}
+		if (ret)
+			pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+					resp.result);
+		break;
+	case QSEOS_RESULT_FAIL_PENDING_OPERATION:
+		pr_debug("Update Key operation under processing...\n");
+		ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		break;
+	case QSEOS_RESULT_FAILURE:
+	default:
+		pr_err("Set key scm call failed resp.result %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	}
+
+	__qseecom_disable_clk(CLK_QSEE);
+	return ret;
+}
+
+
+static int qseecom_enable_ice_setup(int usage)
+{
+	int ret = 0;
+
+	if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
+		ret = qcom_ice_setup_ice_hw("ufs", true);
+	else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
+		ret = qcom_ice_setup_ice_hw("sdcc", true);
+
+	return ret;
+}
+
+static int qseecom_disable_ice_setup(int usage)
+{
+	int ret = 0;
+
+	if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
+		ret = qcom_ice_setup_ice_hw("ufs", false);
+	else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
+		ret = qcom_ice_setup_ice_hw("sdcc", false);
+
+	return ret;
+}
+
+static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
+{
+	struct qseecom_ce_info_use *pce_info_use, *p;
+	int total = 0;
+	int i;
+
+	switch (usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		p = qseecom.ce_info.fde;
+		total = qseecom.ce_info.num_fde;
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		p = qseecom.ce_info.pfe;
+		total = qseecom.ce_info.num_pfe;
+		break;
+	default:
+		pr_err("unsupported usage %d\n", usage);
+		return -EINVAL;
+	}
+
+	pce_info_use = NULL;
+
+	for (i = 0; i < total; i++) {
+		if (p->unit_num == unit) {
+			pce_info_use = p;
+			break;
+		}
+		p++;
+	}
+	if (!pce_info_use) {
+		pr_err("can not find %d\n", unit);
+		return -EINVAL;
+	}
+	return pce_info_use->num_ce_pipe_entries;
+}
+
+static int qseecom_create_key(struct qseecom_dev_handle *data,
+			void __user *argp)
+{
+	int i;
+	uint32_t *ce_hw = NULL;
+	uint32_t pipe = 0;
+	int ret = 0;
+	uint32_t flags = 0;
+	struct qseecom_create_key_req create_key_req;
+	struct qseecom_key_generate_ireq generate_key_ireq;
+	struct qseecom_key_select_ireq set_key_ireq;
+	uint32_t entries = 0;
+
+	ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("unsupported usage %d\n", create_key_req.usage);
+		ret = -EFAULT;
+		return ret;
+	}
+	entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
+					create_key_req.usage);
+	if (entries <= 0) {
+		pr_err("no ce instance for usage %d instance %d\n",
+			DEFAULT_CE_INFO_UNIT, create_key_req.usage);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
+	if (!ce_hw) {
+		ret = -ENOMEM;
+		return ret;
+	}
+	ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
+			DEFAULT_CE_INFO_UNIT);
+	if (ret) {
+		pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
+		ret = -EINVAL;
+		goto free_buf;
+	}
+
+	if (qseecom.fde_key_size)
+		flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
+	else
+		flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
+
+	generate_key_ireq.flags = flags;
+	generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
+	memset((void *)generate_key_ireq.key_id,
+			0, QSEECOM_KEY_ID_SIZE);
+	memset((void *)generate_key_ireq.hash32,
+			0, QSEECOM_HASH_SIZE);
+	memcpy((void *)generate_key_ireq.key_id,
+			(void *)key_id_array[create_key_req.usage].desc,
+			QSEECOM_KEY_ID_SIZE);
+	memcpy((void *)generate_key_ireq.hash32,
+			(void *)create_key_req.hash32,
+			QSEECOM_HASH_SIZE);
+
+	ret = __qseecom_generate_and_save_key(data,
+			create_key_req.usage, &generate_key_ireq);
+	if (ret) {
+		pr_err("Failed to generate key on storage: %d\n", ret);
+		goto free_buf;
+	}
+
+	for (i = 0; i < entries; i++) {
+		set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
+		if (create_key_req.usage ==
+				QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
+			set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
+			set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+
+		} else if (create_key_req.usage ==
+				QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
+			set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
+			set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+
+		} else {
+			set_key_ireq.ce = ce_hw[i];
+			set_key_ireq.pipe = pipe;
+		}
+		set_key_ireq.flags = flags;
+
+		/* set both PIPE_ENC and PIPE_ENC_XTS*/
+		set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
+		memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
+		memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
+		memcpy((void *)set_key_ireq.key_id,
+			(void *)key_id_array[create_key_req.usage].desc,
+			QSEECOM_KEY_ID_SIZE);
+		memcpy((void *)set_key_ireq.hash32,
+				(void *)create_key_req.hash32,
+				QSEECOM_HASH_SIZE);
+		/*
+		 * It will return false if it is GPCE based crypto instance or
+		 * ICE is setup properly
+		 */
+		if (qseecom_enable_ice_setup(create_key_req.usage))
+			goto free_buf;
+
+		do {
+			ret = __qseecom_set_clear_ce_key(data,
+					create_key_req.usage,
+					&set_key_ireq);
+			/*
+			 * wait a little before calling scm again to let other
+			 * processes run
+			 */
+			if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
+				msleep(50);
+
+		} while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
+
+		qseecom_disable_ice_setup(create_key_req.usage);
+
+		if (ret) {
+			pr_err("Failed to create key: pipe %d, ce %d: %d\n",
+				pipe, ce_hw[i], ret);
+			goto free_buf;
+		} else {
+			pr_err("Set the key successfully\n");
+			if ((create_key_req.usage ==
+				QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
+			     (create_key_req.usage ==
+				QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
+				goto free_buf;
+		}
+	}
+
+free_buf:
+	kzfree(ce_hw);
+	return ret;
+}
+
+static int qseecom_wipe_key(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	uint32_t *ce_hw = NULL;
+	uint32_t pipe = 0;
+	int ret = 0;
+	uint32_t flags = 0;
+	int i, j;
+	struct qseecom_wipe_key_req wipe_key_req;
+	struct qseecom_key_delete_ireq delete_key_ireq;
+	struct qseecom_key_select_ireq clear_key_ireq;
+	uint32_t entries = 0;
+
+	ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("unsupported usage %d\n", wipe_key_req.usage);
+		ret = -EFAULT;
+		return ret;
+	}
+
+	entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
+					wipe_key_req.usage);
+	if (entries <= 0) {
+		pr_err("no ce instance for usage %d instance %d\n",
+			DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
+	if (!ce_hw) {
+		ret = -ENOMEM;
+		return ret;
+	}
+
+	ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
+				DEFAULT_CE_INFO_UNIT);
+	if (ret) {
+		pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
+		ret = -EINVAL;
+		goto free_buf;
+	}
+
+	if (wipe_key_req.wipe_key_flag) {
+		delete_key_ireq.flags = flags;
+		delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
+		memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
+		memcpy((void *)delete_key_ireq.key_id,
+			(void *)key_id_array[wipe_key_req.usage].desc,
+			QSEECOM_KEY_ID_SIZE);
+		memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
+
+		ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
+					&delete_key_ireq);
+		if (ret) {
+			pr_err("Failed to delete key from ssd storage: %d\n",
+				ret);
+			ret = -EFAULT;
+			goto free_buf;
+		}
+	}
+
+	for (j = 0; j < entries; j++) {
+		clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
+		if (wipe_key_req.usage ==
+				QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
+			clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
+			clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+		} else if (wipe_key_req.usage ==
+			QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
+			clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
+			clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+		} else {
+			clear_key_ireq.ce = ce_hw[j];
+			clear_key_ireq.pipe = pipe;
+		}
+		clear_key_ireq.flags = flags;
+		clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
+		for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
+			clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
+		memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
+
+		/*
+		 * It will return false if it is GPCE based crypto instance or
+		 * ICE is setup properly
+		 */
+		if (qseecom_enable_ice_setup(wipe_key_req.usage))
+			goto free_buf;
+
+		ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
+					&clear_key_ireq);
+
+		qseecom_disable_ice_setup(wipe_key_req.usage);
+
+		if (ret) {
+			pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
+				pipe, ce_hw[j], ret);
+			ret = -EFAULT;
+			goto free_buf;
+		}
+	}
+
+free_buf:
+	kzfree(ce_hw);
+	return ret;
+}
+
+static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
+			void __user *argp)
+{
+	int ret = 0;
+	uint32_t flags = 0;
+	struct qseecom_update_key_userinfo_req update_key_req;
+	struct qseecom_key_userinfo_update_ireq ireq;
+
+	ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
+		return -EFAULT;
+	}
+
+	ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
+
+	if (qseecom.fde_key_size)
+		flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
+	else
+		flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
+
+	ireq.flags = flags;
+	memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
+	memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
+	memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
+	memcpy((void *)ireq.key_id,
+		(void *)key_id_array[update_key_req.usage].desc,
+		QSEECOM_KEY_ID_SIZE);
+	memcpy((void *)ireq.current_hash32,
+		(void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
+	memcpy((void *)ireq.new_hash32,
+		(void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
+
+	do {
+		ret = __qseecom_update_current_key_user_info(data,
+						update_key_req.usage,
+						&ireq);
+		/*
+		 * wait a little before calling scm again to let other
+		 * processes run
+		 */
+		if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
+			msleep(50);
+
+	} while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
+	if (ret) {
+		pr_err("Failed to update key info: %d\n", ret);
+		return ret;
+	}
+	return ret;
+
+}
+static int qseecom_is_es_activated(void __user *argp)
+{
+	struct qseecom_is_es_activated_req req;
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (qseecom.qsee_version < QSEE_VERSION_04) {
+		pr_err("invalid qsee version\n");
+		return -ENODEV;
+	}
+
+	if (argp == NULL) {
+		pr_err("arg is null\n");
+		return -EINVAL;
+	}
+
+	ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
+		&req, sizeof(req), &resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call failed\n");
+		return ret;
+	}
+
+	req.is_activated = resp.result;
+	ret = copy_to_user(argp, &req, sizeof(req));
+	if (ret) {
+		pr_err("copy_to_user failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int qseecom_save_partition_hash(void __user *argp)
+{
+	struct qseecom_save_partition_hash_req req;
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	memset(&resp, 0x00, sizeof(resp));
+
+	if (qseecom.qsee_version < QSEE_VERSION_04) {
+		pr_err("invalid qsee version\n");
+		return -ENODEV;
+	}
+
+	if (argp == NULL) {
+		pr_err("arg is null\n");
+		return -EINVAL;
+	}
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
+		       (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
+	if (ret) {
+		pr_err("qseecom_scm_call failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int qseecom_mdtp_cipher_dip(void __user *argp)
+{
+	struct qseecom_mdtp_cipher_dip_req req;
+	u32 tzbuflenin, tzbuflenout;
+	char *tzbufin = NULL, *tzbufout = NULL;
+	struct scm_desc desc = {0};
+	int ret;
+
+	do {
+		/* Copy the parameters from userspace */
+		if (argp == NULL) {
+			pr_err("arg is null\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		ret = copy_from_user(&req, argp, sizeof(req));
+		if (ret) {
+			pr_err("copy_from_user failed, ret= %d\n", ret);
+			break;
+		}
+
+		if (req.in_buf == NULL || req.out_buf == NULL ||
+			req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
+			req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
+				req.direction > 1) {
+			pr_err("invalid parameters\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		/* Copy the input buffer from userspace to kernel space */
+		tzbuflenin = PAGE_ALIGN(req.in_buf_size);
+		tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
+		if (!tzbufin) {
+			pr_err("error allocating in buffer\n");
+			ret = -ENOMEM;
+			break;
+		}
+
+		ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
+		if (ret) {
+			pr_err("copy_from_user failed, ret=%d\n", ret);
+			break;
+		}
+
+		dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
+
+		/* Prepare the output buffer in kernel space */
+		tzbuflenout = PAGE_ALIGN(req.out_buf_size);
+		tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
+		if (!tzbufout) {
+			pr_err("error allocating out buffer\n");
+			ret = -ENOMEM;
+			break;
+		}
+
+		dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
+
+		/* Send the command to TZ */
+		desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
+		desc.args[0] = virt_to_phys(tzbufin);
+		desc.args[1] = req.in_buf_size;
+		desc.args[2] = virt_to_phys(tzbufout);
+		desc.args[3] = req.out_buf_size;
+		desc.args[4] = req.direction;
+
+		ret = __qseecom_enable_clk(CLK_QSEE);
+		if (ret)
+			break;
+
+		ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
+
+		__qseecom_disable_clk(CLK_QSEE);
+
+		if (ret) {
+			pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
+				ret);
+			break;
+		}
+
+		/* Copy the output buffer from kernel space to userspace */
+		dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
+		ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
+		if (ret) {
+			pr_err("copy_to_user failed, ret=%d\n", ret);
+			break;
+		}
+	} while (0);
+
+	kzfree(tzbufin);
+	kzfree(tzbufout);
+
+	return ret;
+}
+
+static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
+				struct qseecom_qteec_req *req)
+{
+	if (!data || !data->client.ihandle) {
+		pr_err("Client or client handle is not initialized\n");
+		return -EINVAL;
+	}
+
+	if (data->type != QSEECOM_CLIENT_APP)
+		return -EFAULT;
+
+	if (req->req_len > UINT_MAX - req->resp_len) {
+		pr_err("Integer overflow detected in req_len & rsp_len\n");
+		return -EINVAL;
+	}
+
+	if (req->req_len + req->resp_len > data->client.sb_length) {
+		pr_debug("Not enough memory to fit cmd_buf.\n");
+		pr_debug("resp_buf. Required: %u, Available: %zu\n",
+		(req->req_len + req->resp_len), data->client.sb_length);
+		return -ENOMEM;
+	}
+
+	if (req->req_ptr == NULL || req->resp_ptr == NULL) {
+		pr_err("cmd buffer or response buffer is null\n");
+		return -EINVAL;
+	}
+	if (((uintptr_t)req->req_ptr <
+			data->client.user_virt_sb_base) ||
+		((uintptr_t)req->req_ptr >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("cmd buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+
+	if (((uintptr_t)req->resp_ptr <
+			data->client.user_virt_sb_base)  ||
+		((uintptr_t)req->resp_ptr >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("response buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+
+	if ((req->req_len == 0) || (req->resp_len == 0)) {
+		pr_err("cmd buf lengtgh/response buf length not valid\n");
+		return -EINVAL;
+	}
+
+	if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
+		pr_err("Integer overflow in req_len & req_ptr\n");
+		return -EINVAL;
+	}
+
+	if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
+		pr_err("Integer overflow in resp_len & resp_ptr\n");
+		return -EINVAL;
+	}
+
+	if (data->client.user_virt_sb_base >
+					(ULONG_MAX - data->client.sb_length)) {
+		pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+		return -EINVAL;
+	}
+	if ((((uintptr_t)req->req_ptr + req->req_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+						data->client.sb_length)) ||
+		(((uintptr_t)req->resp_ptr + req->resp_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+						data->client.sb_length))) {
+		pr_err("cmd buf or resp buf is out of shared buffer region\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
+				uint32_t fd_idx, struct sg_table *sg_ptr)
+{
+	struct scatterlist *sg = sg_ptr->sgl;
+	struct qseecom_sg_entry *sg_entry;
+	void *buf;
+	uint i;
+	size_t size;
+	dma_addr_t coh_pmem;
+
+	if (fd_idx >= MAX_ION_FD) {
+		pr_err("fd_idx [%d] is invalid\n", fd_idx);
+		return -ENOMEM;
+	}
+	/*
+	 * Allocate a buffer, populate it with number of entry plus
+	 * each sg entry's phy addr and length; then return the
+	 * phy_addr of the buffer.
+	 */
+	size = sizeof(uint32_t) +
+		sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
+	size = (size + PAGE_SIZE) & PAGE_MASK;
+	buf = dma_alloc_coherent(qseecom.pdev,
+			size, &coh_pmem, GFP_KERNEL);
+	if (buf == NULL) {
+		pr_err("failed to alloc memory for sg buf\n");
+		return -ENOMEM;
+	}
+	*(uint32_t *)buf = sg_ptr->nents;
+	sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
+	for (i = 0; i < sg_ptr->nents; i++) {
+		sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
+		sg_entry->len = sg->length;
+		sg_entry++;
+		sg = sg_next(sg);
+	}
+	data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
+	data->client.sec_buf_fd[fd_idx].vbase = buf;
+	data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
+	data->client.sec_buf_fd[fd_idx].size = size;
+	return 0;
+}
+
+static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
+			struct qseecom_dev_handle *data, bool cleanup)
+{
+	struct ion_handle *ihandle;
+	int ret = 0;
+	int i = 0;
+	uint32_t *update;
+	struct sg_table *sg_ptr = NULL;
+	struct scatterlist *sg;
+	struct qseecom_param_memref *memref;
+
+	if (req == NULL) {
+		pr_err("Invalid address\n");
+		return -EINVAL;
+	}
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (req->ifd_data[i].fd > 0) {
+			ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+					req->ifd_data[i].fd);
+			if (IS_ERR_OR_NULL(ihandle)) {
+				pr_err("Ion client can't retrieve the handle\n");
+				return -ENOMEM;
+			}
+			if ((req->req_len < sizeof(uint32_t)) ||
+				(req->ifd_data[i].cmd_buf_offset >
+				req->req_len - sizeof(uint32_t))) {
+				pr_err("Invalid offset/req len 0x%x/0x%x\n",
+					req->req_len,
+					req->ifd_data[i].cmd_buf_offset);
+				return -EINVAL;
+			}
+			update = (uint32_t *)((char *) req->req_ptr +
+				req->ifd_data[i].cmd_buf_offset);
+			if (!update) {
+				pr_err("update pointer is NULL\n");
+				return -EINVAL;
+			}
+		} else {
+			continue;
+		}
+		/* Populate the cmd data structure with the phys_addr */
+		sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
+		if (IS_ERR_OR_NULL(sg_ptr)) {
+			pr_err("IOn client could not retrieve sg table\n");
+			goto err;
+		}
+		sg = sg_ptr->sgl;
+		if (sg == NULL) {
+			pr_err("sg is NULL\n");
+			goto err;
+		}
+		if ((sg_ptr->nents == 0) || (sg->length == 0)) {
+			pr_err("Num of scat entr (%d)or length(%d) invalid\n",
+					sg_ptr->nents, sg->length);
+			goto err;
+		}
+		/* clean up buf for pre-allocated fd */
+		if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
+			(*update)) {
+			if (data->client.sec_buf_fd[i].vbase)
+				dma_free_coherent(qseecom.pdev,
+					data->client.sec_buf_fd[i].size,
+					data->client.sec_buf_fd[i].vbase,
+					data->client.sec_buf_fd[i].pbase);
+			memset((void *)update, 0,
+				sizeof(struct qseecom_param_memref));
+			memset(&(data->client.sec_buf_fd[i]), 0,
+				sizeof(struct qseecom_sec_buf_fd_info));
+			goto clean;
+		}
+
+		if (*update == 0) {
+			/* update buf for pre-allocated fd from secure heap*/
+			ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
+				sg_ptr);
+			if (ret) {
+				pr_err("Failed to handle buf for fd[%d]\n", i);
+				goto err;
+			}
+			memref = (struct qseecom_param_memref *)update;
+			memref->buffer =
+				(uint32_t)(data->client.sec_buf_fd[i].pbase);
+			memref->size =
+				(uint32_t)(data->client.sec_buf_fd[i].size);
+		} else {
+			/* update buf for fd from non-secure qseecom heap */
+			if (sg_ptr->nents != 1) {
+				pr_err("Num of scat entr (%d) invalid\n",
+					sg_ptr->nents);
+				goto err;
+			}
+			if (cleanup)
+				*update = 0;
+			else
+				*update = (uint32_t)sg_dma_address(sg_ptr->sgl);
+		}
+clean:
+		if (cleanup) {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+				ihandle, NULL, sg->length,
+				ION_IOC_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+		} else {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+				ihandle, NULL, sg->length,
+				ION_IOC_CLEAN_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+			data->sglistinfo_ptr[i].indexAndFlags =
+				SGLISTINFO_SET_INDEX_FLAG(
+				(sg_ptr->nents == 1), 0,
+				req->ifd_data[i].cmd_buf_offset);
+			data->sglistinfo_ptr[i].sizeOrCount =
+				(sg_ptr->nents == 1) ?
+				sg->length : sg_ptr->nents;
+			data->sglist_cnt = i + 1;
+		}
+		/* Deallocate the handle */
+		if (!IS_ERR_OR_NULL(ihandle))
+			ion_free(qseecom.ion_clnt, ihandle);
+	}
+	return ret;
+err:
+	if (!IS_ERR_OR_NULL(ihandle))
+		ion_free(qseecom.ion_clnt, ihandle);
+	return -ENOMEM;
+}
+
+static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
+				struct qseecom_qteec_req *req, uint32_t cmd_id)
+{
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_qteec_ireq ireq;
+	struct qseecom_qteec_64bit_ireq ireq_64bit;
+	struct qseecom_registered_app_list *ptr_app;
+	bool found_app = false;
+	unsigned long flags;
+	int ret = 0;
+	uint32_t reqd_len_sb_in = 0;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = data->sglistinfo_ptr;
+
+	ret  = __qseecom_qteec_validate_msg(data, req);
+	if (ret)
+		return ret;
+
+	/* find app_id & img_name from list */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+							list) {
+		if ((ptr_app->app_id == data->client.app_id) &&
+			 (!strcmp(ptr_app->app_name, data->client.app_name))) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+	if (!found_app) {
+		pr_err("app_id %d (%s) is not found\n", data->client.app_id,
+			(char *)data->client.app_name);
+		return -ENOENT;
+	}
+
+	if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
+			(cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
+		ret = __qseecom_update_qteec_req_buf(
+			(struct qseecom_qteec_modfd_req *)req, data, false);
+		if (ret)
+			return ret;
+	}
+
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		ireq.app_id = data->client.app_id;
+		ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req->req_ptr);
+		ireq.req_len = req->req_len;
+		ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req->resp_ptr);
+		ireq.resp_len = req->resp_len;
+		ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
+		ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+		cmd_buf = (void *)&ireq;
+		cmd_len = sizeof(struct qseecom_qteec_ireq);
+	} else {
+		ireq_64bit.app_id = data->client.app_id;
+		ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req->req_ptr);
+		ireq_64bit.req_len = req->req_len;
+		ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req->resp_ptr);
+		ireq_64bit.resp_len = req->resp_len;
+		if ((data->client.app_arch == ELFCLASS32) &&
+			((ireq_64bit.req_ptr >=
+				PHY_ADDR_4G - ireq_64bit.req_len) ||
+			(ireq_64bit.resp_ptr >=
+				PHY_ADDR_4G - ireq_64bit.resp_len))){
+			pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
+				data->client.app_name, data->client.app_id);
+			pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
+				ireq_64bit.req_ptr, ireq_64bit.req_len,
+				ireq_64bit.resp_ptr, ireq_64bit.resp_len);
+			return -EFAULT;
+		}
+		ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
+		ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+		cmd_buf = (void *)&ireq_64bit;
+		cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
+	}
+	if (qseecom.whitelist_support == true
+		&& cmd_id == QSEOS_TEE_OPEN_SESSION)
+		*(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
+	else
+		*(uint32_t *)cmd_buf = cmd_id;
+
+	reqd_len_sb_in = req->req_len + req->resp_len;
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+					data->client.sb_virt,
+					reqd_len_sb_in,
+					ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	__qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				cmd_buf, cmd_len,
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+		return ret;
+	}
+
+	if (qseecom.qsee_reentrancy_support) {
+		ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+	} else {
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+						ret);
+				return ret;
+			}
+		} else {
+			if (resp.result != QSEOS_RESULT_SUCCESS) {
+				pr_err("Response result %d not supported\n",
+								resp.result);
+				ret = -EINVAL;
+			}
+		}
+	}
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+				data->client.sb_virt, data->client.sb_length,
+				ION_IOC_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
+			(cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
+		ret = __qseecom_update_qteec_req_buf(
+			(struct qseecom_qteec_modfd_req *)req, data, true);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_qteec_modfd_req req;
+	int ret = 0;
+
+	ret = copy_from_user(&req, argp,
+				sizeof(struct qseecom_qteec_modfd_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
+							QSEOS_TEE_OPEN_SESSION);
+
+	return ret;
+}
+
+static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_qteec_req req;
+	int ret = 0;
+
+	ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
+	return ret;
+}
+
+static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_qteec_modfd_req req;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_qteec_ireq ireq;
+	struct qseecom_qteec_64bit_ireq ireq_64bit;
+	struct qseecom_registered_app_list *ptr_app;
+	bool found_app = false;
+	unsigned long flags;
+	int ret = 0;
+	int i = 0;
+	uint32_t reqd_len_sb_in = 0;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = data->sglistinfo_ptr;
+	void *req_ptr = NULL;
+	void *resp_ptr = NULL;
+
+	ret = copy_from_user(&req, argp,
+			sizeof(struct qseecom_qteec_modfd_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	ret = __qseecom_qteec_validate_msg(data,
+					(struct qseecom_qteec_req *)(&req));
+	if (ret)
+		return ret;
+	req_ptr = req.req_ptr;
+	resp_ptr = req.resp_ptr;
+
+	/* find app_id & img_name from list */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+							list) {
+		if ((ptr_app->app_id == data->client.app_id) &&
+			 (!strcmp(ptr_app->app_name, data->client.app_name))) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+	if (!found_app) {
+		pr_err("app_id %d (%s) is not found\n", data->client.app_id,
+			(char *)data->client.app_name);
+		return -ENOENT;
+	}
+
+	/* validate offsets */
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (req.ifd_data[i].fd) {
+			if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
+				return -EINVAL;
+		}
+	}
+	req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req.req_ptr);
+	req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req.resp_ptr);
+	ret = __qseecom_update_qteec_req_buf(&req, data, false);
+	if (ret)
+		return ret;
+
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		ireq.app_id = data->client.app_id;
+		ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req_ptr);
+		ireq.req_len = req.req_len;
+		ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)resp_ptr);
+		ireq.resp_len = req.resp_len;
+		cmd_buf = (void *)&ireq;
+		cmd_len = sizeof(struct qseecom_qteec_ireq);
+		ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
+		ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+	} else {
+		ireq_64bit.app_id = data->client.app_id;
+		ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req_ptr);
+		ireq_64bit.req_len = req.req_len;
+		ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)resp_ptr);
+		ireq_64bit.resp_len = req.resp_len;
+		cmd_buf = (void *)&ireq_64bit;
+		cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
+		ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
+		ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+	}
+	reqd_len_sb_in = req.req_len + req.resp_len;
+	if (qseecom.whitelist_support == true)
+		*(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
+	else
+		*(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
+
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+					data->client.sb_virt,
+					reqd_len_sb_in,
+					ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	__qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				cmd_buf, cmd_len,
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+		return ret;
+	}
+
+	if (qseecom.qsee_reentrancy_support) {
+		ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+	} else {
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+						ret);
+				return ret;
+			}
+		} else {
+			if (resp.result != QSEOS_RESULT_SUCCESS) {
+				pr_err("Response result %d not supported\n",
+								resp.result);
+				ret = -EINVAL;
+			}
+		}
+	}
+	ret = __qseecom_update_qteec_req_buf(&req, data, true);
+	if (ret)
+		return ret;
+
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+				data->client.sb_virt, data->client.sb_length,
+				ION_IOC_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+	return 0;
+}
+
+static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_qteec_modfd_req req;
+	int ret = 0;
+
+	ret = copy_from_user(&req, argp,
+				sizeof(struct qseecom_qteec_modfd_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
+						QSEOS_TEE_REQUEST_CANCELLATION);
+
+	return ret;
+}
+
+static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
+{
+	if (data->sglist_cnt) {
+		memset(data->sglistinfo_ptr, 0,
+			SGLISTINFO_TABLE_SIZE);
+		data->sglist_cnt = 0;
+	}
+}
+
+static inline long qseecom_ioctl(struct file *file,
+			unsigned int cmd, unsigned long arg)
+{
+	int ret = 0;
+	struct qseecom_dev_handle *data = file->private_data;
+	void __user *argp = (void __user *) arg;
+	bool perf_enabled = false;
+
+	if (!data) {
+		pr_err("Invalid/uninitialized device handle\n");
+		return -EINVAL;
+	}
+
+	if (data->abort) {
+		pr_err("Aborting qseecom driver\n");
+		return -ENODEV;
+	}
+
+	switch (cmd) {
+	case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("reg lstnr req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		pr_debug("ioctl register_listener_req()\n");
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		data->type = QSEECOM_LISTENER_SERVICE;
+		ret = qseecom_register_listener(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed qseecom_register_listener: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
+		if ((data->listener.id == 0) ||
+			(data->type != QSEECOM_LISTENER_SERVICE)) {
+			pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
+						data->type, data->listener.id);
+			ret = -EINVAL;
+			break;
+		}
+		pr_debug("ioctl unregister_listener_req()\n");
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_unregister_listener(data);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed qseecom_unregister_listener: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_CMD_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		if (qseecom.support_bus_scaling) {
+			/* register bus bw in case the client doesn't do it */
+			if (!data->mode) {
+				mutex_lock(&qsee_bw_mutex);
+				__qseecom_register_bus_bandwidth_needs(
+								data, HIGH);
+				mutex_unlock(&qsee_bw_mutex);
+			}
+			ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
+			if (ret) {
+				pr_err("Failed to set bw.\n");
+				ret = -EINVAL;
+				mutex_unlock(&app_access_lock);
+				break;
+			}
+		}
+		/*
+		 * On targets where crypto clock is handled by HLOS,
+		 * if clk_access_cnt is zero and perf_enabled is false,
+		 * then the crypto clock was not enabled before sending cmd to
+		 * tz, qseecom will enable the clock to avoid service failure.
+		 */
+		if (!qseecom.no_clock_support &&
+			!qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
+			pr_debug("ce clock is not enabled!\n");
+			ret = qseecom_perf_enable(data);
+			if (ret) {
+				pr_err("Failed to vote for clock with err %d\n",
+						ret);
+				mutex_unlock(&app_access_lock);
+				ret = -EINVAL;
+				break;
+			}
+			perf_enabled = true;
+		}
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_send_cmd(data, argp);
+		if (qseecom.support_bus_scaling)
+			__qseecom_add_bw_scale_down_timer(
+				QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+		if (perf_enabled) {
+			qsee_disable_clock_vote(data, CLK_DFAB);
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		}
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed qseecom_send_cmd: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
+	case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		if (qseecom.support_bus_scaling) {
+			if (!data->mode) {
+				mutex_lock(&qsee_bw_mutex);
+				__qseecom_register_bus_bandwidth_needs(
+								data, HIGH);
+				mutex_unlock(&qsee_bw_mutex);
+			}
+			ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
+			if (ret) {
+				pr_err("Failed to set bw.\n");
+				mutex_unlock(&app_access_lock);
+				ret = -EINVAL;
+				break;
+			}
+		}
+		/*
+		 * On targets where crypto clock is handled by HLOS,
+		 * if clk_access_cnt is zero and perf_enabled is false,
+		 * then the crypto clock was not enabled before sending cmd to
+		 * tz, qseecom will enable the clock to avoid service failure.
+		 */
+		if (!qseecom.no_clock_support &&
+			!qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
+			pr_debug("ce clock is not enabled!\n");
+			ret = qseecom_perf_enable(data);
+			if (ret) {
+				pr_err("Failed to vote for clock with err %d\n",
+						ret);
+				mutex_unlock(&app_access_lock);
+				ret = -EINVAL;
+				break;
+			}
+			perf_enabled = true;
+		}
+		atomic_inc(&data->ioctl_count);
+		if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
+			ret = qseecom_send_modfd_cmd(data, argp);
+		else
+			ret = qseecom_send_modfd_cmd_64(data, argp);
+		if (qseecom.support_bus_scaling)
+			__qseecom_add_bw_scale_down_timer(
+				QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+		if (perf_enabled) {
+			qsee_disable_clock_vote(data, CLK_DFAB);
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		}
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed qseecom_send_cmd: %d\n", ret);
+		__qseecom_clean_data_sglistinfo(data);
+		break;
+	}
+	case QSEECOM_IOCTL_RECEIVE_REQ: {
+		if ((data->listener.id == 0) ||
+			(data->type != QSEECOM_LISTENER_SERVICE)) {
+			pr_err("receive req: invalid handle (%d), lid(%d)\n",
+						data->type, data->listener.id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_receive_req(data);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		if (ret && (ret != -ERESTARTSYS))
+			pr_err("failed qseecom_receive_req: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_RESP_REQ: {
+		if ((data->listener.id == 0) ||
+			(data->type != QSEECOM_LISTENER_SERVICE)) {
+			pr_err("send resp req: invalid handle (%d), lid(%d)\n",
+						data->type, data->listener.id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		if (!qseecom.qsee_reentrancy_support)
+			ret = qseecom_send_resp();
+		else
+			ret = qseecom_reentrancy_send_resp(data);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		if (ret)
+			pr_err("failed qseecom_send_resp: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
+		if ((data->type != QSEECOM_CLIENT_APP) &&
+			(data->type != QSEECOM_GENERIC) &&
+			(data->type != QSEECOM_SECURE_SERVICE)) {
+			pr_err("set mem param req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_set_client_mem_param(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed Qqseecom_set_mem_param request: %d\n",
+								ret);
+		break;
+	}
+	case QSEECOM_IOCTL_LOAD_APP_REQ: {
+		if ((data->type != QSEECOM_GENERIC) &&
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("load app req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->type = QSEECOM_CLIENT_APP;
+		pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_load_app(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed load_app request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_unload_app(data, false);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed unload_app request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_get_qseos_version(data, argp);
+		if (ret)
+			pr_err("qseecom_get_qseos_version: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
+		if ((data->type != QSEECOM_GENERIC) &&
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("perf enable req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if ((data->type == QSEECOM_CLIENT_APP) &&
+			(data->client.app_id == 0)) {
+			pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		if (qseecom.support_bus_scaling) {
+			mutex_lock(&qsee_bw_mutex);
+			__qseecom_register_bus_bandwidth_needs(data, HIGH);
+			mutex_unlock(&qsee_bw_mutex);
+		} else {
+			ret = qseecom_perf_enable(data);
+			if (ret)
+				pr_err("Fail to vote for clocks %d\n", ret);
+		}
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
+		if ((data->type != QSEECOM_SECURE_SERVICE) &&
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("perf disable req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if ((data->type == QSEECOM_CLIENT_APP) &&
+			(data->client.app_id == 0)) {
+			pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		if (!qseecom.support_bus_scaling) {
+			qsee_disable_clock_vote(data, CLK_DFAB);
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		} else {
+			mutex_lock(&qsee_bw_mutex);
+			qseecom_unregister_bus_bandwidth_needs(data);
+			mutex_unlock(&qsee_bw_mutex);
+		}
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+
+	case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
+		/* If crypto clock is not handled by HLOS, return directly. */
+		if (qseecom.no_clock_support) {
+			pr_debug("crypto clock is not handled by HLOS\n");
+			break;
+		}
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_scale_bus_bandwidth(data, argp);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("load ext elf req: invalid client handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_load_external_elf(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed load_external_elf request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
+		if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
+			pr_err("unload ext elf req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_unload_external_elf(data);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed unload_app request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
+		data->type = QSEECOM_CLIENT_APP;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
+		ret = qseecom_query_app_loaded(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("send cmd svc req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->type = QSEECOM_SECURE_SERVICE;
+		if (qseecom.qsee_version < QSEE_VERSION_03) {
+			pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_send_service_cmd(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_CREATE_KEY_REQ: {
+		if (!(qseecom.support_pfe || qseecom.support_fde))
+			pr_err("Features requiring key init not supported\n");
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("create key req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_05) {
+			pr_err("Create Key feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_create_key(data, argp);
+		if (ret)
+			pr_err("failed to create encryption key: %d\n", ret);
+
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_WIPE_KEY_REQ: {
+		if (!(qseecom.support_pfe || qseecom.support_fde))
+			pr_err("Features requiring key init not supported\n");
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("wipe key req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_05) {
+			pr_err("Wipe Key feature unsupported in qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_wipe_key(data, argp);
+		if (ret)
+			pr_err("failed to wipe encryption key: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
+		if (!(qseecom.support_pfe || qseecom.support_fde))
+			pr_err("Features requiring key init not supported\n");
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("update key req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_05) {
+			pr_err("Update Key feature unsupported in qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_update_key_user_info(data, argp);
+		if (ret)
+			pr_err("failed to update key user info: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("save part hash req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_save_partition_hash(argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("ES activated req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_is_es_activated(argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_mdtp_cipher_dip(argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_MODFD_RESP:
+	case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
+		if ((data->listener.id == 0) ||
+			(data->type != QSEECOM_LISTENER_SERVICE)) {
+			pr_err("receive req: invalid handle (%d), lid(%d)\n",
+						data->type, data->listener.id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
+			ret = qseecom_send_modfd_resp(data, argp);
+		else
+			ret = qseecom_send_modfd_resp_64(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		if (ret)
+			pr_err("failed qseecom_send_mod_resp: %d\n", ret);
+		__qseecom_clean_data_sglistinfo(data);
+		break;
+	}
+	case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("Open session: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			pr_err("GP feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_qteec_open_session(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed open_session_cmd: %d\n", ret);
+		__qseecom_clean_data_sglistinfo(data);
+		break;
+	}
+	case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("Close session: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			pr_err("GP feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_qteec_close_session(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed close_session_cmd: %d\n", ret);
+		break;
+	}
+	case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			pr_err("GP feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed Invoke cmd: %d\n", ret);
+		__qseecom_clean_data_sglistinfo(data);
+		break;
+	}
+	case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			pr_err("GP feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_qteec_request_cancellation(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed request_cancellation: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_get_ce_info(data, argp);
+		if (ret)
+			pr_err("failed get fde ce pipe info: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_free_ce_info(data, argp);
+		if (ret)
+			pr_err("failed get fde ce pipe info: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_query_ce_info(data, argp);
+		if (ret)
+			pr_err("failed get fde ce pipe info: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	default:
+		pr_err("Invalid IOCTL: 0x%x\n", cmd);
+		return -EINVAL;
+	}
+	return ret;
+}
+
+static int qseecom_open(struct inode *inode, struct file *file)
+{
+	int ret = 0;
+	struct qseecom_dev_handle *data;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+	file->private_data = data;
+	data->abort = 0;
+	data->type = QSEECOM_GENERIC;
+	data->released = false;
+	memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
+	data->mode = INACTIVE;
+	init_waitqueue_head(&data->abort_wq);
+	atomic_set(&data->ioctl_count, 0);
+	return ret;
+}
+
+static int qseecom_release(struct inode *inode, struct file *file)
+{
+	struct qseecom_dev_handle *data = file->private_data;
+	int ret = 0;
+
+	if (data->released == false) {
+		pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
+			data->type, data->mode, data);
+		switch (data->type) {
+		case QSEECOM_LISTENER_SERVICE:
+			mutex_lock(&app_access_lock);
+			ret = qseecom_unregister_listener(data);
+			mutex_unlock(&app_access_lock);
+			break;
+		case QSEECOM_CLIENT_APP:
+			mutex_lock(&app_access_lock);
+			ret = qseecom_unload_app(data, true);
+			mutex_unlock(&app_access_lock);
+			break;
+		case QSEECOM_SECURE_SERVICE:
+		case QSEECOM_GENERIC:
+			ret = qseecom_unmap_ion_allocated_memory(data);
+			if (ret)
+				pr_err("Ion Unmap failed\n");
+			break;
+		case QSEECOM_UNAVAILABLE_CLIENT_APP:
+			break;
+		default:
+			pr_err("Unsupported clnt_handle_type %d",
+				data->type);
+			break;
+		}
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		if (data->mode != INACTIVE) {
+			qseecom_unregister_bus_bandwidth_needs(data);
+			if (qseecom.cumulative_mode == INACTIVE) {
+				ret = __qseecom_set_msm_bus_request(INACTIVE);
+				if (ret)
+					pr_err("Fail to scale down bus\n");
+			}
+		}
+		mutex_unlock(&qsee_bw_mutex);
+	} else {
+		if (data->fast_load_enabled == true)
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		if (data->perf_enabled == true)
+			qsee_disable_clock_vote(data, CLK_DFAB);
+	}
+	kfree(data);
+
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+#include "compat_qseecom.c"
+#else
+#define compat_qseecom_ioctl	NULL
+#endif
+
+static const struct file_operations qseecom_fops = {
+		.owner = THIS_MODULE,
+		.unlocked_ioctl = qseecom_ioctl,
+		.compat_ioctl = compat_qseecom_ioctl,
+		.open = qseecom_open,
+		.release = qseecom_release
+};
+
+static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
+{
+	int rc = 0;
+	struct device *pdev;
+	struct qseecom_clk *qclk;
+	char *core_clk_src = NULL;
+	char *core_clk = NULL;
+	char *iface_clk = NULL;
+	char *bus_clk = NULL;
+
+	switch (ce) {
+	case CLK_QSEE: {
+		core_clk_src = "core_clk_src";
+		core_clk = "core_clk";
+		iface_clk = "iface_clk";
+		bus_clk = "bus_clk";
+		qclk = &qseecom.qsee;
+		qclk->instance = CLK_QSEE;
+		break;
+	};
+	case CLK_CE_DRV: {
+		core_clk_src = "ce_drv_core_clk_src";
+		core_clk = "ce_drv_core_clk";
+		iface_clk = "ce_drv_iface_clk";
+		bus_clk = "ce_drv_bus_clk";
+		qclk = &qseecom.ce_drv;
+		qclk->instance = CLK_CE_DRV;
+		break;
+	};
+	default:
+		pr_err("Invalid ce hw instance: %d!\n", ce);
+		return -EIO;
+	}
+
+	if (qseecom.no_clock_support) {
+		qclk->ce_core_clk = NULL;
+		qclk->ce_clk = NULL;
+		qclk->ce_bus_clk = NULL;
+		qclk->ce_core_src_clk = NULL;
+		return 0;
+	}
+
+	pdev = qseecom.pdev;
+
+	/* Get CE3 src core clk. */
+	qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
+	if (!IS_ERR(qclk->ce_core_src_clk)) {
+		rc = clk_set_rate(qclk->ce_core_src_clk,
+					qseecom.ce_opp_freq_hz);
+		if (rc) {
+			clk_put(qclk->ce_core_src_clk);
+			qclk->ce_core_src_clk = NULL;
+			pr_err("Unable to set the core src clk @%uMhz.\n",
+				qseecom.ce_opp_freq_hz/CE_CLK_DIV);
+			return -EIO;
+		}
+	} else {
+		pr_warn("Unable to get CE core src clk, set to NULL\n");
+		qclk->ce_core_src_clk = NULL;
+	}
+
+	/* Get CE core clk */
+	qclk->ce_core_clk = clk_get(pdev, core_clk);
+	if (IS_ERR(qclk->ce_core_clk)) {
+		rc = PTR_ERR(qclk->ce_core_clk);
+		pr_err("Unable to get CE core clk\n");
+		if (qclk->ce_core_src_clk != NULL)
+			clk_put(qclk->ce_core_src_clk);
+		return -EIO;
+	}
+
+	/* Get CE Interface clk */
+	qclk->ce_clk = clk_get(pdev, iface_clk);
+	if (IS_ERR(qclk->ce_clk)) {
+		rc = PTR_ERR(qclk->ce_clk);
+		pr_err("Unable to get CE interface clk\n");
+		if (qclk->ce_core_src_clk != NULL)
+			clk_put(qclk->ce_core_src_clk);
+		clk_put(qclk->ce_core_clk);
+		return -EIO;
+	}
+
+	/* Get CE AXI clk */
+	qclk->ce_bus_clk = clk_get(pdev, bus_clk);
+	if (IS_ERR(qclk->ce_bus_clk)) {
+		rc = PTR_ERR(qclk->ce_bus_clk);
+		pr_err("Unable to get CE BUS interface clk\n");
+		if (qclk->ce_core_src_clk != NULL)
+			clk_put(qclk->ce_core_src_clk);
+		clk_put(qclk->ce_core_clk);
+		clk_put(qclk->ce_clk);
+		return -EIO;
+	}
+
+	return rc;
+}
+
+static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
+{
+	struct qseecom_clk *qclk;
+
+	if (ce == CLK_QSEE)
+		qclk = &qseecom.qsee;
+	else
+		qclk = &qseecom.ce_drv;
+
+	if (qclk->ce_clk != NULL) {
+		clk_put(qclk->ce_clk);
+		qclk->ce_clk = NULL;
+	}
+	if (qclk->ce_core_clk != NULL) {
+		clk_put(qclk->ce_core_clk);
+		qclk->ce_core_clk = NULL;
+	}
+	if (qclk->ce_bus_clk != NULL) {
+		clk_put(qclk->ce_bus_clk);
+		qclk->ce_bus_clk = NULL;
+	}
+	if (qclk->ce_core_src_clk != NULL) {
+		clk_put(qclk->ce_core_src_clk);
+		qclk->ce_core_src_clk = NULL;
+	}
+	qclk->instance = CLK_INVALID;
+}
+
+static int qseecom_retrieve_ce_data(struct platform_device *pdev)
+{
+	int rc = 0;
+	uint32_t hlos_num_ce_hw_instances;
+	uint32_t disk_encrypt_pipe;
+	uint32_t file_encrypt_pipe;
+	uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT];
+	int i;
+	const int *tbl;
+	int size;
+	int entry;
+	struct qseecom_crypto_info *pfde_tbl = NULL;
+	struct qseecom_crypto_info *p;
+	int tbl_size;
+	int j;
+	bool old_db = true;
+	struct qseecom_ce_info_use *pce_info_use;
+	uint32_t *unit_tbl = NULL;
+	int total_units = 0;
+	struct qseecom_ce_pipe_entry *pce_entry;
+
+	qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
+	qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
+
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,qsee-ce-hw-instance",
+				&qseecom.ce_info.qsee_ce_hw_instance)) {
+		pr_err("Fail to get qsee ce hw instance information.\n");
+		rc = -EINVAL;
+		goto out;
+	} else {
+		pr_debug("qsee-ce-hw-instance=0x%x\n",
+			qseecom.ce_info.qsee_ce_hw_instance);
+	}
+
+	qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,support-fde");
+	qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,support-pfe");
+
+	if (!qseecom.support_pfe && !qseecom.support_fde) {
+		pr_warn("Device does not support PFE/FDE");
+		goto out;
+	}
+
+	if (qseecom.support_fde)
+		tbl = of_get_property((&pdev->dev)->of_node,
+			"qcom,full-disk-encrypt-info", &size);
+	else
+		tbl = NULL;
+	if (tbl) {
+		old_db = false;
+		if (size % sizeof(struct qseecom_crypto_info)) {
+			pr_err("full-disk-encrypt-info tbl size(%d)\n",
+				size);
+			rc = -EINVAL;
+			goto out;
+		}
+		tbl_size = size / sizeof
+				(struct qseecom_crypto_info);
+
+		pfde_tbl = kzalloc(size, GFP_KERNEL);
+		unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
+		total_units = 0;
+
+		if (!pfde_tbl || !unit_tbl) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+		if (of_property_read_u32_array((&pdev->dev)->of_node,
+			"qcom,full-disk-encrypt-info",
+			(u32 *)pfde_tbl, size/sizeof(u32))) {
+			pr_err("failed to read full-disk-encrypt-info tbl\n");
+			rc = -EINVAL;
+			goto out;
+		}
+
+		for (i = 0, p = pfde_tbl;  i < tbl_size; i++, p++) {
+			for (j = 0; j < total_units; j++) {
+				if (p->unit_num == *(unit_tbl + j))
+					break;
+			}
+			if (j == total_units) {
+				*(unit_tbl + total_units) = p->unit_num;
+				total_units++;
+			}
+		}
+
+		qseecom.ce_info.num_fde = total_units;
+		pce_info_use = qseecom.ce_info.fde = kcalloc(
+			total_units, sizeof(struct qseecom_ce_info_use),
+				GFP_KERNEL);
+		if (!pce_info_use) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		for (j = 0; j < total_units; j++, pce_info_use++) {
+			pce_info_use->unit_num = *(unit_tbl + j);
+			pce_info_use->alloc = false;
+			pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
+			pce_info_use->num_ce_pipe_entries = 0;
+			pce_info_use->ce_pipe_entry = NULL;
+			for (i = 0, p = pfde_tbl;  i < tbl_size; i++, p++) {
+				if (p->unit_num == pce_info_use->unit_num)
+					pce_info_use->num_ce_pipe_entries++;
+			}
+
+			entry = pce_info_use->num_ce_pipe_entries;
+			pce_entry = pce_info_use->ce_pipe_entry =
+				kcalloc(entry,
+					sizeof(struct qseecom_ce_pipe_entry),
+					GFP_KERNEL);
+			if (pce_entry == NULL) {
+				pr_err("failed to alloc memory\n");
+				rc = -ENOMEM;
+				goto out;
+			}
+
+			for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+				if (p->unit_num == pce_info_use->unit_num) {
+					pce_entry->ce_num = p->ce;
+					pce_entry->ce_pipe_pair =
+							p->pipe_pair;
+					pce_entry->valid = true;
+					pce_entry++;
+				}
+			}
+		}
+		kfree(unit_tbl);
+		unit_tbl = NULL;
+		kfree(pfde_tbl);
+		pfde_tbl = NULL;
+	}
+
+	if (qseecom.support_pfe)
+		tbl = of_get_property((&pdev->dev)->of_node,
+			"qcom,per-file-encrypt-info", &size);
+	else
+		tbl = NULL;
+	if (tbl) {
+		old_db = false;
+		if (size % sizeof(struct qseecom_crypto_info)) {
+			pr_err("per-file-encrypt-info tbl size(%d)\n",
+				size);
+			rc = -EINVAL;
+			goto out;
+		}
+		tbl_size = size / sizeof
+				(struct qseecom_crypto_info);
+
+		pfde_tbl = kzalloc(size, GFP_KERNEL);
+		unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
+		total_units = 0;
+		if (!pfde_tbl || !unit_tbl) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+		if (of_property_read_u32_array((&pdev->dev)->of_node,
+			"qcom,per-file-encrypt-info",
+			(u32 *)pfde_tbl, size/sizeof(u32))) {
+			pr_err("failed to read per-file-encrypt-info tbl\n");
+			rc = -EINVAL;
+			goto out;
+		}
+
+		for (i = 0, p = pfde_tbl;  i < tbl_size; i++, p++) {
+			for (j = 0; j < total_units; j++) {
+				if (p->unit_num == *(unit_tbl + j))
+					break;
+			}
+			if (j == total_units) {
+				*(unit_tbl + total_units) = p->unit_num;
+				total_units++;
+			}
+		}
+
+		qseecom.ce_info.num_pfe = total_units;
+		pce_info_use = qseecom.ce_info.pfe = kcalloc(
+			total_units, sizeof(struct qseecom_ce_info_use),
+				GFP_KERNEL);
+		if (!pce_info_use) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		for (j = 0; j < total_units; j++, pce_info_use++) {
+			pce_info_use->unit_num = *(unit_tbl + j);
+			pce_info_use->alloc = false;
+			pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
+			pce_info_use->num_ce_pipe_entries = 0;
+			pce_info_use->ce_pipe_entry = NULL;
+			for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+				if (p->unit_num == pce_info_use->unit_num)
+					pce_info_use->num_ce_pipe_entries++;
+			}
+
+			entry = pce_info_use->num_ce_pipe_entries;
+			pce_entry = pce_info_use->ce_pipe_entry =
+				kcalloc(entry,
+					sizeof(struct qseecom_ce_pipe_entry),
+					GFP_KERNEL);
+			if (pce_entry == NULL) {
+				pr_err("failed to alloc memory\n");
+				rc = -ENOMEM;
+				goto out;
+			}
+
+			for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+				if (p->unit_num == pce_info_use->unit_num) {
+					pce_entry->ce_num = p->ce;
+					pce_entry->ce_pipe_pair =
+							p->pipe_pair;
+					pce_entry->valid = true;
+					pce_entry++;
+				}
+			}
+		}
+		kfree(unit_tbl);
+		unit_tbl = NULL;
+		kfree(pfde_tbl);
+		pfde_tbl = NULL;
+	}
+
+	if (!old_db)
+		goto out1;
+
+	if (of_property_read_bool((&pdev->dev)->of_node,
+			"qcom,support-multiple-ce-hw-instance")) {
+		if (of_property_read_u32((&pdev->dev)->of_node,
+			"qcom,hlos-num-ce-hw-instances",
+				&hlos_num_ce_hw_instances)) {
+			pr_err("Fail: get hlos number of ce hw instance\n");
+			rc = -EINVAL;
+			goto out;
+		}
+	} else {
+		hlos_num_ce_hw_instances = 1;
+	}
+
+	if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
+		pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
+			MAX_CE_PIPE_PAIR_PER_UNIT);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (of_property_read_u32_array((&pdev->dev)->of_node,
+			"qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
+			hlos_num_ce_hw_instances)) {
+		pr_err("Fail: get hlos ce hw instance info\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (qseecom.support_fde) {
+		pce_info_use = qseecom.ce_info.fde =
+			kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
+		if (!pce_info_use) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+		/* by default for old db */
+		qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
+		pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
+		pce_info_use->alloc = false;
+		pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
+		pce_info_use->ce_pipe_entry = NULL;
+		if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,disk-encrypt-pipe-pair",
+				&disk_encrypt_pipe)) {
+			pr_err("Fail to get FDE pipe information.\n");
+			rc = -EINVAL;
+				goto out;
+		} else {
+			pr_debug("disk-encrypt-pipe-pair=0x%x",
+				disk_encrypt_pipe);
+		}
+		entry = pce_info_use->num_ce_pipe_entries =
+				hlos_num_ce_hw_instances;
+		pce_entry = pce_info_use->ce_pipe_entry =
+			kcalloc(entry,
+				sizeof(struct qseecom_ce_pipe_entry),
+				GFP_KERNEL);
+		if (pce_entry == NULL) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+		for (i = 0; i < entry; i++) {
+			pce_entry->ce_num = hlos_ce_hw_instance[i];
+			pce_entry->ce_pipe_pair = disk_encrypt_pipe;
+			pce_entry->valid = 1;
+			pce_entry++;
+		}
+	} else {
+		pr_warn("Device does not support FDE");
+		disk_encrypt_pipe = 0xff;
+	}
+	if (qseecom.support_pfe) {
+		pce_info_use = qseecom.ce_info.pfe =
+			kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
+		if (!pce_info_use) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+		/* by default for old db */
+		qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
+		pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
+		pce_info_use->alloc = false;
+		pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
+		pce_info_use->ce_pipe_entry = NULL;
+
+		if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,file-encrypt-pipe-pair",
+				&file_encrypt_pipe)) {
+			pr_err("Fail to get PFE pipe information.\n");
+			rc = -EINVAL;
+			goto out;
+		} else {
+			pr_debug("file-encrypt-pipe-pair=0x%x",
+				file_encrypt_pipe);
+		}
+		entry = pce_info_use->num_ce_pipe_entries =
+						hlos_num_ce_hw_instances;
+		pce_entry = pce_info_use->ce_pipe_entry =
+			kcalloc(entry,
+				sizeof(struct qseecom_ce_pipe_entry),
+				GFP_KERNEL);
+		if (pce_entry == NULL) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+		for (i = 0; i < entry; i++) {
+			pce_entry->ce_num = hlos_ce_hw_instance[i];
+			pce_entry->ce_pipe_pair = file_encrypt_pipe;
+			pce_entry->valid = 1;
+			pce_entry++;
+		}
+	} else {
+		pr_warn("Device does not support PFE");
+		file_encrypt_pipe = 0xff;
+	}
+
+out1:
+	qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
+	qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
+out:
+	if (rc) {
+		if (qseecom.ce_info.fde) {
+			pce_info_use = qseecom.ce_info.fde;
+			for (i = 0; i < qseecom.ce_info.num_fde; i++) {
+				pce_entry = pce_info_use->ce_pipe_entry;
+				kfree(pce_entry);
+				pce_info_use++;
+			}
+		}
+		kfree(qseecom.ce_info.fde);
+		qseecom.ce_info.fde = NULL;
+		if (qseecom.ce_info.pfe) {
+			pce_info_use = qseecom.ce_info.pfe;
+			for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
+				pce_entry = pce_info_use->ce_pipe_entry;
+				kfree(pce_entry);
+				pce_info_use++;
+			}
+		}
+		kfree(qseecom.ce_info.pfe);
+		qseecom.ce_info.pfe = NULL;
+	}
+	kfree(unit_tbl);
+	kfree(pfde_tbl);
+	return rc;
+}
+
+static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_ce_info_req req;
+	struct qseecom_ce_info_req *pinfo = &req;
+	int ret = 0;
+	int i;
+	unsigned int entries;
+	struct qseecom_ce_info_use *pce_info_use, *p;
+	int total = 0;
+	bool found = false;
+	struct qseecom_ce_pipe_entry *pce_entry;
+
+	ret = copy_from_user(pinfo, argp,
+				sizeof(struct qseecom_ce_info_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	switch (pinfo->usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		if (qseecom.support_fde) {
+			p = qseecom.ce_info.fde;
+			total = qseecom.ce_info.num_fde;
+		} else {
+			pr_err("system does not support fde\n");
+			return -EINVAL;
+		}
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		if (qseecom.support_pfe) {
+			p = qseecom.ce_info.pfe;
+			total = qseecom.ce_info.num_pfe;
+		} else {
+			pr_err("system does not support pfe\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("unsupported usage %d\n", pinfo->usage);
+		return -EINVAL;
+	}
+
+	pce_info_use = NULL;
+	for (i = 0; i < total; i++) {
+		if (!p->alloc)
+			pce_info_use = p;
+		else if (!memcmp(p->handle, pinfo->handle,
+						MAX_CE_INFO_HANDLE_SIZE)) {
+			pce_info_use = p;
+			found = true;
+			break;
+		}
+		p++;
+	}
+
+	if (pce_info_use == NULL)
+		return -EBUSY;
+
+	pinfo->unit_num = pce_info_use->unit_num;
+	if (!pce_info_use->alloc) {
+		pce_info_use->alloc = true;
+		memcpy(pce_info_use->handle,
+			pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
+	}
+	if (pce_info_use->num_ce_pipe_entries >
+					MAX_CE_PIPE_PAIR_PER_UNIT)
+		entries = MAX_CE_PIPE_PAIR_PER_UNIT;
+	else
+		entries = pce_info_use->num_ce_pipe_entries;
+	pinfo->num_ce_pipe_entries = entries;
+	pce_entry = pce_info_use->ce_pipe_entry;
+	for (i = 0; i < entries; i++, pce_entry++)
+		pinfo->ce_pipe_entry[i] = *pce_entry;
+	for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
+		pinfo->ce_pipe_entry[i].valid = 0;
+
+	if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
+		pr_err("copy_to_user failed\n");
+		ret = -EFAULT;
+	}
+	return ret;
+}
+
+static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_ce_info_req req;
+	struct qseecom_ce_info_req *pinfo = &req;
+	int ret = 0;
+	struct qseecom_ce_info_use *p;
+	int total = 0;
+	int i;
+	bool found = false;
+
+	ret = copy_from_user(pinfo, argp,
+				sizeof(struct qseecom_ce_info_req));
+	if (ret)
+		return ret;
+
+	switch (pinfo->usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		if (qseecom.support_fde) {
+			p = qseecom.ce_info.fde;
+			total = qseecom.ce_info.num_fde;
+		} else {
+			pr_err("system does not support fde\n");
+			return -EINVAL;
+		}
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		if (qseecom.support_pfe) {
+			p = qseecom.ce_info.pfe;
+			total = qseecom.ce_info.num_pfe;
+		} else {
+			pr_err("system does not support pfe\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("unsupported usage %d\n", pinfo->usage);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < total; i++) {
+		if (p->alloc &&
+			!memcmp(p->handle, pinfo->handle,
+					MAX_CE_INFO_HANDLE_SIZE)) {
+			memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
+			p->alloc = false;
+			found = true;
+			break;
+		}
+		p++;
+	}
+	return ret;
+}
+
+static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_ce_info_req req;
+	struct qseecom_ce_info_req *pinfo = &req;
+	int ret = 0;
+	int i;
+	unsigned int entries;
+	struct qseecom_ce_info_use *pce_info_use, *p;
+	int total = 0;
+	bool found = false;
+	struct qseecom_ce_pipe_entry *pce_entry;
+
+	ret = copy_from_user(pinfo, argp,
+				sizeof(struct qseecom_ce_info_req));
+	if (ret)
+		return ret;
+
+	switch (pinfo->usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		if (qseecom.support_fde) {
+			p = qseecom.ce_info.fde;
+			total = qseecom.ce_info.num_fde;
+		} else {
+			pr_err("system does not support fde\n");
+			return -EINVAL;
+		}
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		if (qseecom.support_pfe) {
+			p = qseecom.ce_info.pfe;
+			total = qseecom.ce_info.num_pfe;
+		} else {
+			pr_err("system does not support pfe\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("unsupported usage %d\n", pinfo->usage);
+		return -EINVAL;
+	}
+
+	pce_info_use = NULL;
+	pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
+	pinfo->num_ce_pipe_entries  = 0;
+	for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
+		pinfo->ce_pipe_entry[i].valid = 0;
+
+	for (i = 0; i < total; i++) {
+
+		if (p->alloc && !memcmp(p->handle,
+				pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
+			pce_info_use = p;
+			found = true;
+			break;
+		}
+		p++;
+	}
+	if (!pce_info_use)
+		goto out;
+	pinfo->unit_num = pce_info_use->unit_num;
+	if (pce_info_use->num_ce_pipe_entries >
+					MAX_CE_PIPE_PAIR_PER_UNIT)
+		entries = MAX_CE_PIPE_PAIR_PER_UNIT;
+	else
+		entries = pce_info_use->num_ce_pipe_entries;
+	pinfo->num_ce_pipe_entries = entries;
+	pce_entry = pce_info_use->ce_pipe_entry;
+	for (i = 0; i < entries; i++, pce_entry++)
+		pinfo->ce_pipe_entry[i] = *pce_entry;
+	for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
+		pinfo->ce_pipe_entry[i].valid = 0;
+out:
+	if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
+		pr_err("copy_to_user failed\n");
+		ret = -EFAULT;
+	}
+	return ret;
+}
+
+/*
+ * Check whitelist feature, and if TZ feature version is < 1.0.0,
+ * then whitelist feature is not supported.
+ */
+static int qseecom_check_whitelist_feature(void)
+{
+	int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
+
+	return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
+}
+
+static int qseecom_probe(struct platform_device *pdev)
+{
+	int rc;
+	int i;
+	uint32_t feature = 10;
+	struct device *class_dev;
+	struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_ce_info_use *pce_info_use = NULL;
+
+	qseecom.qsee_bw_count = 0;
+	qseecom.qsee_perf_client = 0;
+	qseecom.qsee_sfpb_bw_count = 0;
+
+	qseecom.qsee.ce_core_clk = NULL;
+	qseecom.qsee.ce_clk = NULL;
+	qseecom.qsee.ce_core_src_clk = NULL;
+	qseecom.qsee.ce_bus_clk = NULL;
+
+	qseecom.cumulative_mode = 0;
+	qseecom.current_mode = INACTIVE;
+	qseecom.support_bus_scaling = false;
+	qseecom.support_fde = false;
+	qseecom.support_pfe = false;
+
+	qseecom.ce_drv.ce_core_clk = NULL;
+	qseecom.ce_drv.ce_clk = NULL;
+	qseecom.ce_drv.ce_core_src_clk = NULL;
+	qseecom.ce_drv.ce_bus_clk = NULL;
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
+
+	qseecom.app_block_ref_cnt = 0;
+	init_waitqueue_head(&qseecom.app_block_wq);
+	qseecom.whitelist_support = true;
+
+	rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
+	if (rc < 0) {
+		pr_err("alloc_chrdev_region failed %d\n", rc);
+		return rc;
+	}
+
+	driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
+	if (IS_ERR(driver_class)) {
+		rc = -ENOMEM;
+		pr_err("class_create failed %d\n", rc);
+		goto exit_unreg_chrdev_region;
+	}
+
+	class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
+			QSEECOM_DEV);
+	if (IS_ERR(class_dev)) {
+		pr_err("class_device_create failed %d\n", rc);
+		rc = -ENOMEM;
+		goto exit_destroy_class;
+	}
+
+	cdev_init(&qseecom.cdev, &qseecom_fops);
+	qseecom.cdev.owner = THIS_MODULE;
+
+	rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
+	if (rc < 0) {
+		pr_err("cdev_add failed %d\n", rc);
+		goto exit_destroy_device;
+	}
+
+	INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
+	spin_lock_init(&qseecom.registered_listener_list_lock);
+	INIT_LIST_HEAD(&qseecom.registered_app_list_head);
+	spin_lock_init(&qseecom.registered_app_list_lock);
+	INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
+	spin_lock_init(&qseecom.registered_kclient_list_lock);
+	init_waitqueue_head(&qseecom.send_resp_wq);
+	qseecom.send_resp_flag = 0;
+
+	qseecom.qsee_version = QSEEE_VERSION_00;
+	rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
+		&resp, sizeof(resp));
+	pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
+	if (rc) {
+		pr_err("Failed to get QSEE version info %d\n", rc);
+		goto exit_del_cdev;
+	}
+	qseecom.qsee_version = resp.result;
+	qseecom.qseos_version = QSEOS_VERSION_14;
+	qseecom.commonlib_loaded = false;
+	qseecom.commonlib64_loaded = false;
+	qseecom.pdev = class_dev;
+	/* Create ION msm client */
+	qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
+	if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
+		pr_err("Ion client cannot be created\n");
+		rc = -ENOMEM;
+		goto exit_del_cdev;
+	}
+
+	/* register client for bus scaling */
+	if (pdev->dev.of_node) {
+		qseecom.pdev->of_node = pdev->dev.of_node;
+		qseecom.support_bus_scaling =
+				of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,support-bus-scaling");
+		rc = qseecom_retrieve_ce_data(pdev);
+		if (rc)
+			goto exit_destroy_ion_client;
+		qseecom.appsbl_qseecom_support =
+				of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,appsbl-qseecom-support");
+		pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
+				qseecom.appsbl_qseecom_support);
+
+		qseecom.commonlib64_loaded =
+				of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,commonlib64-loaded-by-uefi");
+		pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
+				qseecom.commonlib64_loaded);
+		qseecom.fde_key_size =
+			of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,fde-key-size");
+		qseecom.no_clock_support =
+				of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,no-clock-support");
+		if (!qseecom.no_clock_support) {
+			pr_info("qseecom clocks handled by other subsystem\n");
+		} else {
+			pr_info("no-clock-support=0x%x",
+			qseecom.no_clock_support);
+		}
+
+		if (of_property_read_u32((&pdev->dev)->of_node,
+					"qcom,qsee-reentrancy-support",
+					&qseecom.qsee_reentrancy_support)) {
+			pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
+			qseecom.qsee_reentrancy_support = 0;
+		} else {
+			pr_warn("qseecom.qsee_reentrancy_support = %d\n",
+				qseecom.qsee_reentrancy_support);
+		}
+
+		/*
+		 * The qseecom bus scaling flag can not be enabled when
+		 * crypto clock is not handled by HLOS.
+		 */
+		if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
+			pr_err("support_bus_scaling flag can not be enabled.\n");
+			rc = -EINVAL;
+			goto exit_destroy_ion_client;
+		}
+
+		if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,ce-opp-freq",
+				&qseecom.ce_opp_freq_hz)) {
+			pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
+			qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
+		}
+		rc = __qseecom_init_clk(CLK_QSEE);
+		if (rc)
+			goto exit_destroy_ion_client;
+
+		if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
+				(qseecom.support_pfe || qseecom.support_fde)) {
+			rc = __qseecom_init_clk(CLK_CE_DRV);
+			if (rc) {
+				__qseecom_deinit_clk(CLK_QSEE);
+				goto exit_destroy_ion_client;
+			}
+		} else {
+			struct qseecom_clk *qclk;
+
+			qclk = &qseecom.qsee;
+			qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
+			qseecom.ce_drv.ce_clk = qclk->ce_clk;
+			qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
+			qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
+		}
+
+		qseecom_platform_support = (struct msm_bus_scale_pdata *)
+						msm_bus_cl_get_pdata(pdev);
+		if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
+			(!qseecom.is_apps_region_protected &&
+			!qseecom.appsbl_qseecom_support)) {
+			struct resource *resource = NULL;
+			struct qsee_apps_region_info_ireq req;
+			struct qsee_apps_region_info_64bit_ireq req_64bit;
+			struct qseecom_command_scm_resp resp;
+			void *cmd_buf = NULL;
+			size_t cmd_len;
+
+			resource = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "secapp-region");
+			if (resource) {
+				if (qseecom.qsee_version < QSEE_VERSION_40) {
+					req.qsee_cmd_id =
+						QSEOS_APP_REGION_NOTIFICATION;
+					req.addr = (uint32_t)resource->start;
+					req.size = resource_size(resource);
+					cmd_buf = (void *)&req;
+					cmd_len = sizeof(struct
+						qsee_apps_region_info_ireq);
+					pr_warn("secure app region addr=0x%x size=0x%x",
+							req.addr, req.size);
+				} else {
+					req_64bit.qsee_cmd_id =
+						QSEOS_APP_REGION_NOTIFICATION;
+					req_64bit.addr = resource->start;
+					req_64bit.size = resource_size(
+							resource);
+					cmd_buf = (void *)&req_64bit;
+					cmd_len = sizeof(struct
+					qsee_apps_region_info_64bit_ireq);
+					pr_warn("secure app region addr=0x%llx size=0x%x",
+						req_64bit.addr, req_64bit.size);
+				}
+			} else {
+				pr_err("Fail to get secure app region info\n");
+				rc = -EINVAL;
+				goto exit_deinit_clock;
+			}
+			rc = __qseecom_enable_clk(CLK_QSEE);
+			if (rc) {
+				pr_err("CLK_QSEE enabling failed (%d)\n", rc);
+				rc = -EIO;
+				goto exit_deinit_clock;
+			}
+			rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+					cmd_buf, cmd_len,
+					&resp, sizeof(resp));
+			__qseecom_disable_clk(CLK_QSEE);
+			if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
+				pr_err("send secapp reg fail %d resp.res %d\n",
+							rc, resp.result);
+				rc = -EINVAL;
+				goto exit_deinit_clock;
+			}
+		}
+	/*
+	 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
+	 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
+	 * Pls add "qseecom.commonlib64_loaded = true" here too.
+	 */
+		if (qseecom.is_apps_region_protected ||
+					qseecom.appsbl_qseecom_support)
+			qseecom.commonlib_loaded = true;
+	} else {
+		qseecom_platform_support = (struct msm_bus_scale_pdata *)
+						pdev->dev.platform_data;
+	}
+	if (qseecom.support_bus_scaling) {
+		init_timer(&(qseecom.bw_scale_down_timer));
+		INIT_WORK(&qseecom.bw_inactive_req_ws,
+					qseecom_bw_inactive_req_work);
+		qseecom.bw_scale_down_timer.function =
+				qseecom_scale_bus_bandwidth_timer_callback;
+	}
+	qseecom.timer_running = false;
+	qseecom.qsee_perf_client = msm_bus_scale_register_client(
+					qseecom_platform_support);
+
+	qseecom.whitelist_support = qseecom_check_whitelist_feature();
+	pr_warn("qseecom.whitelist_support = %d\n",
+				qseecom.whitelist_support);
+
+	if (!qseecom.qsee_perf_client)
+		pr_err("Unable to register bus client\n");
+
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
+	return 0;
+
+exit_deinit_clock:
+	__qseecom_deinit_clk(CLK_QSEE);
+	if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
+		(qseecom.support_pfe || qseecom.support_fde))
+		__qseecom_deinit_clk(CLK_CE_DRV);
+exit_destroy_ion_client:
+	if (qseecom.ce_info.fde) {
+		pce_info_use = qseecom.ce_info.fde;
+		for (i = 0; i < qseecom.ce_info.num_fde; i++) {
+			kzfree(pce_info_use->ce_pipe_entry);
+			pce_info_use++;
+		}
+		kfree(qseecom.ce_info.fde);
+	}
+	if (qseecom.ce_info.pfe) {
+		pce_info_use = qseecom.ce_info.pfe;
+		for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
+			kzfree(pce_info_use->ce_pipe_entry);
+			pce_info_use++;
+		}
+		kfree(qseecom.ce_info.pfe);
+	}
+	ion_client_destroy(qseecom.ion_clnt);
+exit_del_cdev:
+	cdev_del(&qseecom.cdev);
+exit_destroy_device:
+	device_destroy(driver_class, qseecom_device_no);
+exit_destroy_class:
+	class_destroy(driver_class);
+exit_unreg_chrdev_region:
+	unregister_chrdev_region(qseecom_device_no, 1);
+	return rc;
+}
+
+static int qseecom_remove(struct platform_device *pdev)
+{
+	struct qseecom_registered_kclient_list *kclient = NULL;
+	unsigned long flags = 0;
+	int ret = 0;
+	int i;
+	struct qseecom_ce_pipe_entry *pce_entry;
+	struct qseecom_ce_info_use *pce_info_use;
+
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
+	spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
+
+	list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
+								list) {
+		if (!kclient)
+			goto exit_irqrestore;
+
+		/* Break the loop if client handle is NULL */
+		if (!kclient->handle)
+			goto exit_free_kclient;
+
+		if (list_empty(&kclient->list))
+			goto exit_free_kc_handle;
+
+		list_del(&kclient->list);
+		mutex_lock(&app_access_lock);
+		ret = qseecom_unload_app(kclient->handle->dev, false);
+		mutex_unlock(&app_access_lock);
+		if (!ret) {
+			kzfree(kclient->handle->dev);
+			kzfree(kclient->handle);
+			kzfree(kclient);
+		}
+	}
+
+exit_free_kc_handle:
+	kzfree(kclient->handle);
+exit_free_kclient:
+	kzfree(kclient);
+exit_irqrestore:
+	spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
+
+	if (qseecom.qseos_version > QSEEE_VERSION_00)
+		qseecom_unload_commonlib_image();
+
+	if (qseecom.qsee_perf_client)
+		msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
+									0);
+	if (pdev->dev.platform_data != NULL)
+		msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
+
+	if (qseecom.support_bus_scaling) {
+		cancel_work_sync(&qseecom.bw_inactive_req_ws);
+		del_timer_sync(&qseecom.bw_scale_down_timer);
+	}
+
+	if (qseecom.ce_info.fde) {
+		pce_info_use = qseecom.ce_info.fde;
+		for (i = 0; i < qseecom.ce_info.num_fde; i++) {
+			pce_entry = pce_info_use->ce_pipe_entry;
+			kfree(pce_entry);
+			pce_info_use++;
+		}
+	}
+	kfree(qseecom.ce_info.fde);
+	if (qseecom.ce_info.pfe) {
+		pce_info_use = qseecom.ce_info.pfe;
+		for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
+			pce_entry = pce_info_use->ce_pipe_entry;
+			kfree(pce_entry);
+			pce_info_use++;
+		}
+	}
+	kfree(qseecom.ce_info.pfe);
+
+	/* register client for bus scaling */
+	if (pdev->dev.of_node) {
+		__qseecom_deinit_clk(CLK_QSEE);
+		if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
+				(qseecom.support_pfe || qseecom.support_fde))
+			__qseecom_deinit_clk(CLK_CE_DRV);
+	}
+
+	ion_client_destroy(qseecom.ion_clnt);
+
+	cdev_del(&qseecom.cdev);
+
+	device_destroy(driver_class, qseecom_device_no);
+
+	class_destroy(driver_class);
+
+	unregister_chrdev_region(qseecom_device_no, 1);
+
+	return ret;
+}
+
+static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	int ret = 0;
+	struct qseecom_clk *qclk;
+
+	qclk = &qseecom.qsee;
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
+	if (qseecom.no_clock_support)
+		return 0;
+
+	mutex_lock(&qsee_bw_mutex);
+	mutex_lock(&clk_access_lock);
+
+	if (qseecom.current_mode != INACTIVE) {
+		ret = msm_bus_scale_client_update_request(
+			qseecom.qsee_perf_client, INACTIVE);
+		if (ret)
+			pr_err("Fail to scale down bus\n");
+		else
+			qseecom.current_mode = INACTIVE;
+	}
+
+	if (qclk->clk_access_cnt) {
+		if (qclk->ce_clk != NULL)
+			clk_disable_unprepare(qclk->ce_clk);
+		if (qclk->ce_core_clk != NULL)
+			clk_disable_unprepare(qclk->ce_core_clk);
+		if (qclk->ce_bus_clk != NULL)
+			clk_disable_unprepare(qclk->ce_bus_clk);
+	}
+
+	del_timer_sync(&(qseecom.bw_scale_down_timer));
+	qseecom.timer_running = false;
+
+	mutex_unlock(&clk_access_lock);
+	mutex_unlock(&qsee_bw_mutex);
+	cancel_work_sync(&qseecom.bw_inactive_req_ws);
+
+	return 0;
+}
+
+static int qseecom_resume(struct platform_device *pdev)
+{
+	int mode = 0;
+	int ret = 0;
+	struct qseecom_clk *qclk;
+
+	qclk = &qseecom.qsee;
+	if (qseecom.no_clock_support)
+		goto exit;
+
+	mutex_lock(&qsee_bw_mutex);
+	mutex_lock(&clk_access_lock);
+	if (qseecom.cumulative_mode >= HIGH)
+		mode = HIGH;
+	else
+		mode = qseecom.cumulative_mode;
+
+	if (qseecom.cumulative_mode != INACTIVE) {
+		ret = msm_bus_scale_client_update_request(
+			qseecom.qsee_perf_client, mode);
+		if (ret)
+			pr_err("Fail to scale up bus to %d\n", mode);
+		else
+			qseecom.current_mode = mode;
+	}
+
+	if (qclk->clk_access_cnt) {
+		if (qclk->ce_core_clk != NULL) {
+			ret = clk_prepare_enable(qclk->ce_core_clk);
+			if (ret) {
+				pr_err("Unable to enable/prep CE core clk\n");
+				qclk->clk_access_cnt = 0;
+				goto err;
+			}
+		}
+		if (qclk->ce_clk != NULL) {
+			ret = clk_prepare_enable(qclk->ce_clk);
+			if (ret) {
+				pr_err("Unable to enable/prep CE iface clk\n");
+				qclk->clk_access_cnt = 0;
+				goto ce_clk_err;
+			}
+		}
+		if (qclk->ce_bus_clk != NULL) {
+			ret = clk_prepare_enable(qclk->ce_bus_clk);
+			if (ret) {
+				pr_err("Unable to enable/prep CE bus clk\n");
+				qclk->clk_access_cnt = 0;
+				goto ce_bus_clk_err;
+			}
+		}
+	}
+
+	if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
+		qseecom.bw_scale_down_timer.expires = jiffies +
+			msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+		mod_timer(&(qseecom.bw_scale_down_timer),
+				qseecom.bw_scale_down_timer.expires);
+		qseecom.timer_running = true;
+	}
+
+	mutex_unlock(&clk_access_lock);
+	mutex_unlock(&qsee_bw_mutex);
+	goto exit;
+
+ce_bus_clk_err:
+	if (qclk->ce_clk)
+		clk_disable_unprepare(qclk->ce_clk);
+ce_clk_err:
+	if (qclk->ce_core_clk)
+		clk_disable_unprepare(qclk->ce_core_clk);
+err:
+	mutex_unlock(&clk_access_lock);
+	mutex_unlock(&qsee_bw_mutex);
+	ret = -EIO;
+exit:
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
+	return ret;
+}
+
+static const struct of_device_id qseecom_match[] = {
+	{
+		.compatible = "qcom,qseecom",
+	},
+	{}
+};
+
+static struct platform_driver qseecom_plat_driver = {
+	.probe = qseecom_probe,
+	.remove = qseecom_remove,
+	.suspend = qseecom_suspend,
+	.resume = qseecom_resume,
+	.driver = {
+		.name = "qseecom",
+		.owner = THIS_MODULE,
+		.of_match_table = qseecom_match,
+	},
+};
+
+static int qseecom_init(void)
+{
+	return platform_driver_register(&qseecom_plat_driver);
+}
+
+static void qseecom_exit(void)
+{
+	platform_driver_unregister(&qseecom_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
+
+module_init(qseecom_init);
+module_exit(qseecom_exit);
diff --git a/drivers/misc/qseecom_kernel.h b/drivers/misc/qseecom_kernel.h
new file mode 100644
index 0000000..5ca5839
--- /dev/null
+++ b/drivers/misc/qseecom_kernel.h
@@ -0,0 +1,44 @@
+/* Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QSEECOM_KERNEL_H_
+#define __QSEECOM_KERNEL_H_
+
+#include <linux/types.h>
+#include <soc/qcom/scm.h>
+
+#define QSEECOM_ALIGN_SIZE	0x40
+#define QSEECOM_ALIGN_MASK	(QSEECOM_ALIGN_SIZE - 1)
+#define QSEECOM_ALIGN(x)	\
+	((x + QSEECOM_ALIGN_MASK) & (~QSEECOM_ALIGN_MASK))
+
+/*
+ * struct qseecom_handle -
+ *      Handle to the qseecom device for kernel clients
+ * @sbuf - shared buffer pointer
+ * @sbbuf_len - shared buffer size
+ */
+struct qseecom_handle {
+	void *dev; /* in/out */
+	unsigned char *sbuf; /* in/out */
+	uint32_t sbuf_len; /* in/out */
+};
+
+int qseecom_start_app(struct qseecom_handle **handle,
+						char *app_name, uint32_t size);
+int qseecom_shutdown_app(struct qseecom_handle **handle);
+int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
+			uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len);
+int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high);
+int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc);
+
+#endif /* __QSEECOM_KERNEL_H_ */
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 387ae1c..a8b430f 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -29,6 +29,8 @@
 
 #include "sdhci-pltfm.h"
 
+#define SDMMC_MC1R	0x204
+#define		SDMMC_MC1R_DDR		BIT(3)
 #define SDMMC_CACR	0x230
 #define		SDMMC_CACR_CAPWREN	BIT(0)
 #define		SDMMC_CACR_KEY		(0x46 << 8)
@@ -103,11 +105,18 @@
 	sdhci_set_power_noreg(host, mode, vdd);
 }
 
+void sdhci_at91_set_uhs_signaling(struct sdhci_host *host, unsigned int timing)
+{
+	if (timing == MMC_TIMING_MMC_DDR52)
+		sdhci_writeb(host, SDMMC_MC1R_DDR, SDMMC_MC1R);
+	sdhci_set_uhs_signaling(host, timing);
+}
+
 static const struct sdhci_ops sdhci_at91_sama5d2_ops = {
 	.set_clock		= sdhci_at91_set_clock,
 	.set_bus_width		= sdhci_set_bus_width,
 	.reset			= sdhci_reset,
-	.set_uhs_signaling	= sdhci_set_uhs_signaling,
+	.set_uhs_signaling	= sdhci_at91_set_uhs_signaling,
 	.set_power		= sdhci_at91_set_power,
 };
 
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index b3e0c7c..2e7d4d4 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2119,6 +2119,9 @@
 	struct sdhci_host *host = mmc_priv(mmc);
 	unsigned long flags;
 
+	if (enable)
+		pm_runtime_get_noresume(host->mmc->parent);
+
 	spin_lock_irqsave(&host->lock, flags);
 	if (enable)
 		host->flags |= SDHCI_SDIO_IRQ_ENABLED;
@@ -2127,6 +2130,9 @@
 
 	sdhci_enable_sdio_irq_nolock(host, enable);
 	spin_unlock_irqrestore(&host->lock, flags);
+
+	if (!enable)
+		pm_runtime_put_noidle(host->mmc->parent);
 }
 
 static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index da10b48..bde769b 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -2057,9 +2057,9 @@
 		 * Revalidating a dead namespace sets capacity to 0. This will
 		 * end buffered writers dirtying pages that can't be synced.
 		 */
-		if (ns->disk && !test_and_set_bit(NVME_NS_DEAD, &ns->flags))
-			revalidate_disk(ns->disk);
-
+		if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+			continue;
+		revalidate_disk(ns->disk);
 		blk_set_queue_dying(ns->queue);
 		blk_mq_abort_requeue_list(ns->queue);
 		blk_mq_start_stopped_hw_queues(ns->queue, true);
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 5e52034..8a9c186 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1983,8 +1983,10 @@
 
 	pci_set_drvdata(pdev, NULL);
 
-	if (!pci_device_is_present(pdev))
+	if (!pci_device_is_present(pdev)) {
 		nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD);
+		nvme_dev_disable(dev, false);
+	}
 
 	flush_work(&dev->reset_work);
 	nvme_uninit_ctrl(&dev->ctrl);
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index fdbbe41..1e45c73 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -24,6 +24,7 @@
 #include <linux/kernel.h>
 #include <linux/of_pci.h>
 #include <linux/pci.h>
+#include <linux/iommu.h>
 #include <linux/platform_device.h>
 #include <linux/regulator/consumer.h>
 #include <linux/regulator/rpm-smd-regulator.h>
@@ -480,6 +481,11 @@
 	MSM_PCIE_LINK_DISABLED
 };
 
+enum msm_pcie_boot_option {
+	MSM_PCIE_NO_PROBE_ENUMERATION = BIT(0),
+	MSM_PCIE_NO_WAKE_ENUMERATION = BIT(1)
+};
+
 /* gpio info structure */
 struct msm_pcie_gpio_info_t {
 	char	*name;
@@ -628,7 +634,7 @@
 	uint32_t			perst_delay_us_max;
 	uint32_t			tlp_rd_size;
 	bool				linkdown_panic;
-	bool				 ep_wakeirq;
+	uint32_t			boot_option;
 
 	uint32_t			   rc_idx;
 	uint32_t			phy_ver;
@@ -1949,8 +1955,8 @@
 		dev->aer_enable ? "" : "not");
 	PCIE_DBG_FS(dev, "ext_ref_clk is %d\n",
 		dev->ext_ref_clk);
-	PCIE_DBG_FS(dev, "ep_wakeirq is %d\n",
-		dev->ep_wakeirq);
+	PCIE_DBG_FS(dev, "boot_option is 0x%x\n",
+		dev->boot_option);
 	PCIE_DBG_FS(dev, "phy_ver is %d\n",
 		dev->phy_ver);
 	PCIE_DBG_FS(dev, "drv_ready is %d\n",
@@ -2417,8 +2423,16 @@
 			dev->res[base_sel - 1].base,
 			wr_offset, wr_mask, wr_value);
 
-		msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
-			wr_offset, wr_mask, wr_value);
+		base_sel_size = resource_size(dev->res[base_sel - 1].resource);
+
+		if (wr_offset >  base_sel_size - 4 ||
+			msm_pcie_check_align(dev, wr_offset))
+			PCIE_DBG_FS(dev,
+				"PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
+				dev->rc_idx, wr_offset, base_sel_size - 4);
+		else
+			msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
+				wr_offset, wr_mask, wr_value);
 
 		break;
 	case 13: /* dump all registers of base_sel */
@@ -2505,6 +2519,48 @@
 }
 EXPORT_SYMBOL(msm_pcie_debug_info);
 
+#ifdef CONFIG_SYSFS
+static ssize_t msm_pcie_enumerate_store(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t count)
+{
+	struct msm_pcie_dev_t *pcie_dev = (struct msm_pcie_dev_t *)
+						dev_get_drvdata(dev);
+
+	if (pcie_dev)
+		msm_pcie_enumerate(pcie_dev->rc_idx);
+
+	return count;
+}
+
+static DEVICE_ATTR(enumerate, 0200, NULL, msm_pcie_enumerate_store);
+
+static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
+{
+	int ret;
+
+	ret = device_create_file(&dev->pdev->dev, &dev_attr_enumerate);
+	if (ret)
+		PCIE_DBG_FS(dev,
+			"RC%d: failed to create sysfs enumerate node\n",
+			dev->rc_idx);
+}
+
+static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
+{
+	if (dev->pdev)
+		device_remove_file(&dev->pdev->dev, &dev_attr_enumerate);
+}
+#else
+static void msm_pcie_sysfs_init(struct msm_pcie_dev_t *dev)
+{
+}
+
+static void msm_pcie_sysfs_exit(struct msm_pcie_dev_t *dev)
+{
+}
+#endif
+
 #ifdef CONFIG_DEBUG_FS
 static struct dentry *dent_msm_pcie;
 static struct dentry *dfile_rc_sel;
@@ -2514,7 +2570,7 @@
 static struct dentry *dfile_wr_offset;
 static struct dentry *dfile_wr_mask;
 static struct dentry *dfile_wr_value;
-static struct dentry *dfile_ep_wakeirq;
+static struct dentry *dfile_boot_option;
 static struct dentry *dfile_aer_enable;
 static struct dentry *dfile_corr_counter_limit;
 
@@ -2528,13 +2584,14 @@
 	char str[MAX_MSG_LEN];
 	unsigned int testcase = 0;
 	int i;
+	u32 size = sizeof(str) < count ? sizeof(str) : count;
 
-	memset(str, 0, sizeof(str));
-	ret = copy_from_user(str, buf, sizeof(str));
+	memset(str, 0, size);
+	ret = copy_from_user(str, buf, size);
 	if (ret)
 		return -EFAULT;
 
-	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
 		testcase = (testcase * 10) + (str[i] - '0');
 
 	if (!rc_sel)
@@ -2563,13 +2620,14 @@
 	char str[MAX_MSG_LEN];
 	int i;
 	u32 new_rc_sel = 0;
+	u32 size = sizeof(str) < count ? sizeof(str) : count;
 
-	memset(str, 0, sizeof(str));
-	ret = copy_from_user(str, buf, sizeof(str));
+	memset(str, 0, size);
+	ret = copy_from_user(str, buf, size);
 	if (ret)
 		return -EFAULT;
 
-	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
 		new_rc_sel = (new_rc_sel * 10) + (str[i] - '0');
 
 	if ((!new_rc_sel) || (new_rc_sel > rc_sel_max)) {
@@ -2606,13 +2664,14 @@
 	int i;
 	u32 new_base_sel = 0;
 	char *base_sel_name;
+	u32 size = sizeof(str) < count ? sizeof(str) : count;
 
-	memset(str, 0, sizeof(str));
-	ret = copy_from_user(str, buf, sizeof(str));
+	memset(str, 0, size);
+	ret = copy_from_user(str, buf, size);
 	if (ret)
 		return -EFAULT;
 
-	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
 		new_base_sel = (new_base_sel * 10) + (str[i] - '0');
 
 	if (!new_base_sel || new_base_sel > 5) {
@@ -2707,14 +2766,15 @@
 	unsigned long ret;
 	char str[MAX_MSG_LEN];
 	int i;
+	u32 size = sizeof(str) < count ? sizeof(str) : count;
 
-	memset(str, 0, sizeof(str));
-	ret = copy_from_user(str, buf, sizeof(str));
+	memset(str, 0, size);
+	ret = copy_from_user(str, buf, size);
 	if (ret)
 		return -EFAULT;
 
 	wr_offset = 0;
-	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
 		wr_offset = (wr_offset * 10) + (str[i] - '0');
 
 	pr_alert("PCIe: wr_offset is now 0x%x\n", wr_offset);
@@ -2733,14 +2793,15 @@
 	unsigned long ret;
 	char str[MAX_MSG_LEN];
 	int i;
+	u32 size = sizeof(str) < count ? sizeof(str) : count;
 
-	memset(str, 0, sizeof(str));
-	ret = copy_from_user(str, buf, sizeof(str));
+	memset(str, 0, size);
+	ret = copy_from_user(str, buf, size);
 	if (ret)
 		return -EFAULT;
 
 	wr_mask = 0;
-	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
 		wr_mask = (wr_mask * 10) + (str[i] - '0');
 
 	pr_alert("PCIe: wr_mask is now 0x%x\n", wr_mask);
@@ -2758,14 +2819,15 @@
 	unsigned long ret;
 	char str[MAX_MSG_LEN];
 	int i;
+	u32 size = sizeof(str) < count ? sizeof(str) : count;
 
-	memset(str, 0, sizeof(str));
-	ret = copy_from_user(str, buf, sizeof(str));
+	memset(str, 0, size);
+	ret = copy_from_user(str, buf, size);
 	if (ret)
 		return -EFAULT;
 
 	wr_value = 0;
-	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
 		wr_value = (wr_value * 10) + (str[i] - '0');
 
 	pr_alert("PCIe: wr_value is now 0x%x\n", wr_value);
@@ -2777,13 +2839,13 @@
 	.write = msm_pcie_set_wr_value,
 };
 
-static ssize_t msm_pcie_set_ep_wakeirq(struct file *file,
+static ssize_t msm_pcie_set_boot_option(struct file *file,
 				const char __user *buf,
 				size_t count, loff_t *ppos)
 {
 	unsigned long ret;
 	char str[MAX_MSG_LEN];
-	u32 new_ep_wakeirq = 0;
+	u32 new_boot_option = 0;
 	int i;
 
 	memset(str, 0, sizeof(str));
@@ -2792,33 +2854,33 @@
 		return -EFAULT;
 
 	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
-		new_ep_wakeirq = (new_ep_wakeirq * 10) + (str[i] - '0');
+		new_boot_option = (new_boot_option * 10) + (str[i] - '0');
 
-	if (new_ep_wakeirq <= 1) {
+	if (new_boot_option <= 1) {
 		for (i = 0; i < MAX_RC_NUM; i++) {
 			if (!rc_sel) {
-				msm_pcie_dev[0].ep_wakeirq = new_ep_wakeirq;
+				msm_pcie_dev[0].boot_option = new_boot_option;
 				PCIE_DBG_FS(&msm_pcie_dev[0],
-					"PCIe: RC0: ep_wakeirq is now %d\n",
-					msm_pcie_dev[0].ep_wakeirq);
+					"PCIe: RC0: boot_option is now 0x%x\n",
+					msm_pcie_dev[0].boot_option);
 				break;
 			} else if (rc_sel & (1 << i)) {
-				msm_pcie_dev[i].ep_wakeirq = new_ep_wakeirq;
+				msm_pcie_dev[i].boot_option = new_boot_option;
 				PCIE_DBG_FS(&msm_pcie_dev[i],
-					"PCIe: RC%d: ep_wakeirq is now %d\n",
-					i, msm_pcie_dev[i].ep_wakeirq);
+					"PCIe: RC%d: boot_option is now 0x%x\n",
+					i, msm_pcie_dev[i].boot_option);
 			}
 		}
 	} else {
-		pr_err("PCIe: Invalid input for ep_wakeirq: %d. Please enter 0 or 1.\n",
-			new_ep_wakeirq);
+		pr_err("PCIe: Invalid input for boot_option: 0x%x.\n",
+			new_boot_option);
 	}
 
 	return count;
 }
 
-const struct file_operations msm_pcie_ep_wakeirq_ops = {
-	.write = msm_pcie_set_ep_wakeirq,
+const struct file_operations msm_pcie_boot_option_ops = {
+	.write = msm_pcie_set_boot_option,
 };
 
 static ssize_t msm_pcie_set_aer_enable(struct file *file,
@@ -2884,14 +2946,15 @@
 	unsigned long ret;
 	char str[MAX_MSG_LEN];
 	int i;
+	u32 size = sizeof(str) < count ? sizeof(str) : count;
 
-	memset(str, 0, sizeof(str));
-	ret = copy_from_user(str, buf, sizeof(str));
+	memset(str, 0, size);
+	ret = copy_from_user(str, buf, size);
 	if (ret)
 		return -EFAULT;
 
 	corr_counter_limit = 0;
-	for (i = 0; i < sizeof(str) && (str[i] >= '0') && (str[i] <= '9'); ++i)
+	for (i = 0; i < size && (str[i] >= '0') && (str[i] <= '9'); ++i)
 		corr_counter_limit = (corr_counter_limit * 10) + (str[i] - '0');
 
 	pr_info("PCIe: corr_counter_limit is now %lu\n", corr_counter_limit);
@@ -2970,12 +3033,12 @@
 		goto wr_value_error;
 	}
 
-	dfile_ep_wakeirq = debugfs_create_file("ep_wakeirq", 0664,
+	dfile_boot_option = debugfs_create_file("boot_option", 0664,
 					dent_msm_pcie, 0,
-					&msm_pcie_ep_wakeirq_ops);
-	if (!dfile_ep_wakeirq || IS_ERR(dfile_ep_wakeirq)) {
-		pr_err("PCIe: fail to create the file for debug_fs ep_wakeirq.\n");
-		goto ep_wakeirq_error;
+					&msm_pcie_boot_option_ops);
+	if (!dfile_boot_option || IS_ERR(dfile_boot_option)) {
+		pr_err("PCIe: fail to create the file for debug_fs boot_option.\n");
+		goto boot_option_error;
 	}
 
 	dfile_aer_enable = debugfs_create_file("aer_enable", 0664,
@@ -2998,8 +3061,8 @@
 corr_counter_limit_error:
 	debugfs_remove(dfile_aer_enable);
 aer_enable_error:
-	debugfs_remove(dfile_ep_wakeirq);
-ep_wakeirq_error:
+	debugfs_remove(dfile_boot_option);
+boot_option_error:
 	debugfs_remove(dfile_wr_value);
 wr_value_error:
 	debugfs_remove(dfile_wr_mask);
@@ -3026,7 +3089,7 @@
 	debugfs_remove(dfile_wr_offset);
 	debugfs_remove(dfile_wr_mask);
 	debugfs_remove(dfile_wr_value);
-	debugfs_remove(dfile_ep_wakeirq);
+	debugfs_remove(dfile_boot_option);
 	debugfs_remove(dfile_aer_enable);
 	debugfs_remove(dfile_corr_counter_limit);
 }
@@ -3257,7 +3320,7 @@
 
 	word_offset = where & ~0x3;
 	byte_offset = where & 0x3;
-	mask = (~0 >> (8 * (4 - size))) << (8 * byte_offset);
+	mask = ((u32)~0 >> (8 * (4 - size))) << (8 * byte_offset);
 
 	if (rc || !dev->enumerated) {
 		config_base = rc ? dev->dm_core : dev->conf;
@@ -3292,12 +3355,17 @@
 		writel_relaxed(wr_val, config_base + word_offset);
 		wmb(); /* ensure config data is written to hardware register */
 
-		if (rd_val == PCIE_LINK_DOWN)
-			PCIE_ERR(dev,
-				"Read of RC%d %d:0x%02x + 0x%04x[%d] is all FFs\n",
-				rc_idx, bus->number, devfn, where, size);
-		else if (dev->shadow_en)
-			msm_pcie_save_shadow(dev, word_offset, wr_val, bdf, rc);
+		if (dev->shadow_en) {
+			if (rd_val == PCIE_LINK_DOWN &&
+				(readl_relaxed(config_base) == PCIE_LINK_DOWN))
+				PCIE_ERR(dev,
+					"Read of RC%d %d:0x%02x + 0x%04x[%d] is all FFs\n",
+					rc_idx, bus->number, devfn,
+					where, size);
+			else
+				msm_pcie_save_shadow(dev, word_offset, wr_val,
+					bdf, rc);
+		}
 
 		PCIE_DBG3(dev,
 			"RC%d %d:0x%02x + 0x%04x[%d] <- 0x%08x; rd 0x%08x val 0x%08x\n",
@@ -4591,6 +4659,8 @@
 	do {
 		usleep_range(LINK_UP_TIMEOUT_US_MIN, LINK_UP_TIMEOUT_US_MAX);
 		val =  readl_relaxed(dev->elbi + PCIE20_ELBI_SYS_STTS);
+		PCIE_DBG(dev, "PCIe RC%d: LTSSM_STATE:0x%x\n",
+			dev->rc_idx, (val >> 12) & 0x3f);
 	} while ((!(val & XMLH_LINK_UP) ||
 		!msm_pcie_confirm_linkup(dev, false, false, NULL))
 		&& (link_check_count++ < LINK_UP_CHECK_MAX_COUNT));
@@ -5358,14 +5428,10 @@
 	PCIE_DBG2(dev, "PCIe WAKE is asserted by Endpoint of RC%d\n",
 		dev->rc_idx);
 
-	if (!dev->enumerated) {
-		PCIE_DBG(dev, "Start enumeating RC%d\n", dev->rc_idx);
-		if (dev->ep_wakeirq)
-			schedule_work(&dev->handle_wake_work);
-		else
-			PCIE_DBG(dev,
-				"wake irq is received but ep_wakeirq is not supported for RC%d.\n",
-				dev->rc_idx);
+	if (!dev->enumerated && !(dev->boot_option &
+		MSM_PCIE_NO_WAKE_ENUMERATION)) {
+		PCIE_DBG(dev, "Start enumerating RC%d\n", dev->rc_idx);
+		schedule_work(&dev->handle_wake_work);
 	} else {
 		PCIE_DBG2(dev, "Wake up RC%d\n", dev->rc_idx);
 		__pm_stay_awake(&dev->ws);
@@ -5511,7 +5577,7 @@
 				handle_aer_irq(irq, data);
 				break;
 			default:
-				PCIE_ERR(dev,
+				PCIE_DUMP(dev,
 					"PCIe: RC%d: Unexpected event %d is caught!\n",
 					dev->rc_idx, i);
 			}
@@ -5523,35 +5589,84 @@
 	return IRQ_HANDLED;
 }
 
-void msm_pcie_destroy_irq(unsigned int irq, struct msm_pcie_dev_t *pcie_dev)
+static void msm_pcie_unmap_qgic_addr(struct msm_pcie_dev_t *dev,
+					struct pci_dev *pdev)
 {
-	int pos, i;
-	struct msm_pcie_dev_t *dev;
+	struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
+	int bypass_en = 0;
 
-	if (pcie_dev)
-		dev = pcie_dev;
-	else
-		dev = irq_get_chip_data(irq);
-
-	if (!dev) {
-		pr_err("PCIe: device is null. IRQ:%d\n", irq);
+	if (!domain) {
+		PCIE_DBG(dev,
+			"PCIe: RC%d: client does not have an iommu domain\n",
+			dev->rc_idx);
 		return;
 	}
 
+	iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
+	if (!bypass_en) {
+		int ret;
+		phys_addr_t pcie_base_addr =
+			dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
+		dma_addr_t iova = rounddown(pcie_base_addr, PAGE_SIZE);
+
+		ret = iommu_unmap(domain, iova, PAGE_SIZE);
+		if (ret != PAGE_SIZE)
+			PCIE_ERR(dev,
+				"PCIe: RC%d: failed to unmap QGIC address. ret = %d\n",
+				dev->rc_idx, ret);
+	}
+}
+
+void msm_pcie_destroy_irq(unsigned int irq)
+{
+	int pos;
+	struct pci_dev *pdev = irq_get_chip_data(irq);
+	struct msi_desc *entry = irq_get_msi_desc(irq);
+	struct msi_desc *firstentry;
+	struct msm_pcie_dev_t *dev;
+	u32 nvec;
+	int firstirq;
+
+	if (!pdev) {
+		pr_err("PCIe: pci device is null. IRQ:%d\n", irq);
+		return;
+	}
+
+	dev = PCIE_BUS_PRIV_DATA(pdev->bus);
+	if (!dev) {
+		pr_err("PCIe: could not find RC. IRQ:%d\n", irq);
+		return;
+	}
+
+	if (!entry) {
+		PCIE_ERR(dev, "PCIe: RC%d: msi desc is null. IRQ:%d\n",
+			dev->rc_idx, irq);
+		return;
+	}
+
+	firstentry = first_pci_msi_entry(pdev);
+	if (!firstentry) {
+		PCIE_ERR(dev,
+			"PCIe: RC%d: firstentry msi desc is null. IRQ:%d\n",
+			dev->rc_idx, irq);
+		return;
+	}
+
+	firstirq = firstentry->irq;
+	nvec = (1 << entry->msi_attrib.multiple);
+
 	if (dev->msi_gicm_addr) {
 		PCIE_DBG(dev, "destroy QGIC based irq %d\n", irq);
 
-		for (i = 0; i < MSM_PCIE_MAX_MSI; i++)
-			if (irq == dev->msi[i].num)
-				break;
-		if (i == MSM_PCIE_MAX_MSI) {
+		if (irq < firstirq || irq > firstirq + nvec - 1) {
 			PCIE_ERR(dev,
 				"Could not find irq: %d in RC%d MSI table\n",
 				irq, dev->rc_idx);
 			return;
 		}
-
-		pos = i;
+		if (irq == firstirq + nvec - 1)
+			msm_pcie_unmap_qgic_addr(dev, pdev);
+		pos = irq - firstirq;
 	} else {
 		PCIE_DBG(dev, "destroy default MSI irq %d\n", irq);
 		pos = irq - irq_find_mapping(dev->irq_domain, 0);
@@ -5570,7 +5685,7 @@
 void arch_teardown_msi_irq(unsigned int irq)
 {
 	PCIE_GEN_DBG("irq %d deallocated\n", irq);
-	msm_pcie_destroy_irq(irq, NULL);
+	msm_pcie_destroy_irq(irq);
 }
 
 void arch_teardown_msi_irqs(struct pci_dev *dev)
@@ -5590,7 +5705,7 @@
 			continue;
 		nvec = 1 << entry->msi_attrib.multiple;
 		for (i = 0; i < nvec; i++)
-			msm_pcie_destroy_irq(entry->irq + i, pcie_dev);
+			arch_teardown_msi_irq(entry->irq + i);
 	}
 }
 
@@ -5651,6 +5766,7 @@
 
 	PCIE_DBG(dev, "irq %d allocated\n", irq);
 
+	irq_set_chip_data(irq, pdev);
 	irq_set_msi_desc(irq, desc);
 
 	/* write msi vector and data */
@@ -5698,10 +5814,64 @@
 	return irq;
 }
 
+static int msm_pcie_map_qgic_addr(struct msm_pcie_dev_t *dev,
+					struct pci_dev *pdev,
+					struct msi_msg *msg)
+{
+	struct iommu_domain *domain = iommu_get_domain_for_dev(&pdev->dev);
+	int ret, bypass_en = 0;
+	dma_addr_t iova;
+	phys_addr_t pcie_base_addr, gicm_db_offset;
+
+	msg->address_hi = 0;
+	msg->address_lo = dev->msi_gicm_addr;
+
+	if (!domain) {
+		PCIE_DBG(dev,
+			"PCIe: RC%d: client does not have an iommu domain\n",
+			dev->rc_idx);
+		return 0;
+	}
+
+	iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass_en);
+
+	PCIE_DBG(dev,
+		"PCIe: RC%d: Stage 1 is %s for endpoint: %04x:%02x\n",
+		dev->rc_idx, bypass_en ? "bypass" : "enabled",
+		pdev->bus->number, pdev->devfn);
+
+	if (bypass_en)
+		return 0;
+
+	gicm_db_offset = dev->msi_gicm_addr -
+		rounddown(dev->msi_gicm_addr, PAGE_SIZE);
+	/*
+	 * Use PCIe DBI address as the IOVA since client cannot
+	 * use this address for their IOMMU mapping. This will
+	 * prevent any conflicts between PCIe host and
+	 * client's mapping.
+	 */
+	pcie_base_addr = dev->res[MSM_PCIE_RES_DM_CORE].resource->start;
+	iova = rounddown(pcie_base_addr, PAGE_SIZE);
+
+	ret = iommu_map(domain, iova, rounddown(dev->msi_gicm_addr, PAGE_SIZE),
+			PAGE_SIZE, IOMMU_READ | IOMMU_WRITE);
+	if (ret < 0) {
+		PCIE_ERR(dev,
+			"PCIe: RC%d: ret: %d: Could not do iommu map for QGIC address\n",
+			dev->rc_idx, ret);
+		return -ENOMEM;
+	}
+
+	msg->address_lo = iova + gicm_db_offset;
+
+	return 0;
+}
+
 static int arch_setup_msi_irq_qgic(struct pci_dev *pdev,
 		struct msi_desc *desc, int nvec)
 {
-	int irq, index, firstirq = 0;
+	int irq, index, ret, firstirq = 0;
 	struct msi_msg msg;
 	struct msm_pcie_dev_t *dev = PCIE_BUS_PRIV_DATA(pdev->bus);
 
@@ -5718,12 +5888,16 @@
 			firstirq = irq;
 
 		irq_set_irq_type(irq, IRQ_TYPE_EDGE_RISING);
+		irq_set_chip_data(irq, pdev);
 	}
 
 	/* write msi vector and data */
 	irq_set_msi_desc(firstirq, desc);
-	msg.address_hi = 0;
-	msg.address_lo = dev->msi_gicm_addr;
+
+	ret = msm_pcie_map_qgic_addr(dev, pdev, &msg);
+	if (ret)
+		return ret;
+
 	msg.data = dev->msi_gicm_base + (firstirq - dev->msi[0].num);
 	write_msi_msg(firstirq, &msg);
 
@@ -5795,7 +5969,6 @@
 	   irq_hw_number_t hwirq)
 {
 	irq_set_chip_and_handler (irq, &pcie_msi_chip, handle_simple_irq);
-	irq_set_chip_data(irq, domain->host_data);
 	return 0;
 }
 
@@ -6041,12 +6214,12 @@
 			msm_pcie_dev[rc_idx].rc_idx,
 			msm_pcie_dev[rc_idx].smmu_sid_base);
 
-	msm_pcie_dev[rc_idx].ep_wakeirq =
-		of_property_read_bool((&pdev->dev)->of_node,
-				"qcom,ep-wakeirq");
+	msm_pcie_dev[rc_idx].boot_option = 0;
+	ret = of_property_read_u32((&pdev->dev)->of_node, "qcom,boot-option",
+				&msm_pcie_dev[rc_idx].boot_option);
 	PCIE_DBG(&msm_pcie_dev[rc_idx],
-		"PCIe: EP of RC%d does %s assert wake when it is up.\n",
-		rc_idx, msm_pcie_dev[rc_idx].ep_wakeirq ? "" : "not");
+		"PCIe: RC%d boot option is 0x%x.\n",
+		rc_idx, msm_pcie_dev[rc_idx].boot_option);
 
 	msm_pcie_dev[rc_idx].phy_ver = 1;
 	ret = of_property_read_u32((&pdev->dev)->of_node,
@@ -6271,6 +6444,9 @@
 		msm_pcie_dev[rc_idx].pcidev_table[i].registered = true;
 	}
 
+	dev_set_drvdata(&msm_pcie_dev[rc_idx].pdev->dev, &msm_pcie_dev[rc_idx]);
+	msm_pcie_sysfs_init(&msm_pcie_dev[rc_idx]);
+
 	ret = msm_pcie_get_resources(&msm_pcie_dev[rc_idx],
 				msm_pcie_dev[rc_idx].pdev);
 
@@ -6322,9 +6498,10 @@
 
 	msm_pcie_dev[rc_idx].drv_ready = true;
 
-	if (msm_pcie_dev[rc_idx].ep_wakeirq) {
+	if (msm_pcie_dev[rc_idx].boot_option &
+			MSM_PCIE_NO_PROBE_ENUMERATION) {
 		PCIE_DBG(&msm_pcie_dev[rc_idx],
-			"PCIe: RC%d will be enumerated upon WAKE signal from Endpoint.\n",
+			"PCIe: RC%d will be enumerated by client or endpoint.\n",
 			rc_idx);
 		mutex_unlock(&pcie_drv.drv_lock);
 		return 0;
@@ -6484,11 +6661,16 @@
 
 static void __exit pcie_exit(void)
 {
+	int i;
+
 	PCIE_GEN_DBG("pcie:%s.\n", __func__);
 
 	platform_driver_unregister(&msm_pcie_driver);
 
 	msm_pcie_debugfs_exit();
+
+	for (i = 0; i < MAX_RC_NUM; i++)
+		msm_pcie_sysfs_exit(&msm_pcie_dev[i]);
 }
 
 subsys_initcall_sync(pcie_init);
diff --git a/drivers/pci/host/pcie-iproc-bcma.c b/drivers/pci/host/pcie-iproc-bcma.c
index 8ce0890..46ca8ed 100644
--- a/drivers/pci/host/pcie-iproc-bcma.c
+++ b/drivers/pci/host/pcie-iproc-bcma.c
@@ -44,8 +44,7 @@
 {
 	struct device *dev = &bdev->dev;
 	struct iproc_pcie *pcie;
-	LIST_HEAD(res);
-	struct resource res_mem;
+	LIST_HEAD(resources);
 	int ret;
 
 	pcie = devm_kzalloc(dev, sizeof(*pcie), GFP_KERNEL);
@@ -62,22 +61,23 @@
 
 	pcie->base_addr = bdev->addr;
 
-	res_mem.start = bdev->addr_s[0];
-	res_mem.end = bdev->addr_s[0] + SZ_128M - 1;
-	res_mem.name = "PCIe MEM space";
-	res_mem.flags = IORESOURCE_MEM;
-	pci_add_resource(&res, &res_mem);
+	pcie->mem.start = bdev->addr_s[0];
+	pcie->mem.end = bdev->addr_s[0] + SZ_128M - 1;
+	pcie->mem.name = "PCIe MEM space";
+	pcie->mem.flags = IORESOURCE_MEM;
+	pci_add_resource(&resources, &pcie->mem);
 
 	pcie->map_irq = iproc_pcie_bcma_map_irq;
 
-	ret = iproc_pcie_setup(pcie, &res);
-	if (ret)
+	ret = iproc_pcie_setup(pcie, &resources);
+	if (ret) {
 		dev_err(dev, "PCIe controller setup failed\n");
-
-	pci_free_resource_list(&res);
+		pci_free_resource_list(&resources);
+		return ret;
+	}
 
 	bcma_set_drvdata(bdev, pcie);
-	return ret;
+	return 0;
 }
 
 static void iproc_pcie_bcma_remove(struct bcma_device *bdev)
diff --git a/drivers/pci/host/pcie-iproc-platform.c b/drivers/pci/host/pcie-iproc-platform.c
index a3de087..7dcaddc 100644
--- a/drivers/pci/host/pcie-iproc-platform.c
+++ b/drivers/pci/host/pcie-iproc-platform.c
@@ -46,7 +46,7 @@
 	struct device_node *np = dev->of_node;
 	struct resource reg;
 	resource_size_t iobase = 0;
-	LIST_HEAD(res);
+	LIST_HEAD(resources);
 	int ret;
 
 	of_id = of_match_device(iproc_pcie_of_match_table, dev);
@@ -108,23 +108,24 @@
 		pcie->phy = NULL;
 	}
 
-	ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &iobase);
+	ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &resources,
+					       &iobase);
 	if (ret) {
-		dev_err(dev,
-			"unable to get PCI host bridge resources\n");
+		dev_err(dev, "unable to get PCI host bridge resources\n");
 		return ret;
 	}
 
 	pcie->map_irq = of_irq_parse_and_map_pci;
 
-	ret = iproc_pcie_setup(pcie, &res);
-	if (ret)
+	ret = iproc_pcie_setup(pcie, &resources);
+	if (ret) {
 		dev_err(dev, "PCIe controller setup failed\n");
-
-	pci_free_resource_list(&res);
+		pci_free_resource_list(&resources);
+		return ret;
+	}
 
 	platform_set_drvdata(pdev, pcie);
-	return ret;
+	return 0;
 }
 
 static int iproc_pcie_pltfm_remove(struct platform_device *pdev)
diff --git a/drivers/pci/host/pcie-iproc.h b/drivers/pci/host/pcie-iproc.h
index e84d93c..fa42267 100644
--- a/drivers/pci/host/pcie-iproc.h
+++ b/drivers/pci/host/pcie-iproc.h
@@ -68,6 +68,7 @@
 #ifdef CONFIG_ARM
 	struct pci_sys_data sysdata;
 #endif
+	struct resource mem;
 	struct pci_bus *root_bus;
 	struct phy *phy;
 	int (*map_irq)(const struct pci_dev *, u8, u8);
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
index 51c930a..ae06d54 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_uc_offload.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -131,23 +131,23 @@
 	IPA_UC_OFFLOAD_DBG("register interface for netdev %s\n",
 					 inp->netdev_name);
 	memset(&param, 0, sizeof(param));
-	param.name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
+	param.name = IPA_RM_RESOURCE_ETHERNET_PROD;
 	param.reg_params.user_data = ntn_ctx;
 	param.reg_params.notify_cb = ipa_uc_offload_rm_notify;
 	param.floor_voltage = IPA_VOLTAGE_SVS;
 	ret = ipa_rm_create_resource(&param);
 	if (ret) {
-		IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_PROD resource\n");
+		IPA_UC_OFFLOAD_ERR("fail to create ETHERNET_PROD resource\n");
 		return -EFAULT;
 	}
 
 	memset(&param, 0, sizeof(param));
-	param.name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
+	param.name = IPA_RM_RESOURCE_ETHERNET_CONS;
 	param.request_resource = ipa_uc_ntn_cons_request;
 	param.release_resource = ipa_uc_ntn_cons_release;
 	ret = ipa_rm_create_resource(&param);
 	if (ret) {
-		IPA_UC_OFFLOAD_ERR("fail to create ODU_ADAPT_CONS resource\n");
+		IPA_UC_OFFLOAD_ERR("fail to create ETHERNET_CONS resource\n");
 		goto fail_create_rm_cons;
 	}
 
@@ -177,13 +177,13 @@
 
 	memset(tx_prop, 0, sizeof(tx_prop));
 	tx_prop[0].ip = IPA_IP_v4;
-	tx_prop[0].dst_pipe = IPA_CLIENT_ODU_TETH_CONS;
+	tx_prop[0].dst_pipe = IPA_CLIENT_ETHERNET_CONS;
 	tx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
 	memcpy(tx_prop[0].hdr_name, hdr->hdr[IPA_IP_v4].name,
 		sizeof(tx_prop[0].hdr_name));
 
 	tx_prop[1].ip = IPA_IP_v6;
-	tx_prop[1].dst_pipe = IPA_CLIENT_ODU_TETH_CONS;
+	tx_prop[1].dst_pipe = IPA_CLIENT_ETHERNET_CONS;
 	tx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
 	memcpy(tx_prop[1].hdr_name, hdr->hdr[IPA_IP_v6].name,
 		sizeof(tx_prop[1].hdr_name));
@@ -194,7 +194,7 @@
 
 	memset(rx_prop, 0, sizeof(rx_prop));
 	rx_prop[0].ip = IPA_IP_v4;
-	rx_prop[0].src_pipe = IPA_CLIENT_ODU_PROD;
+	rx_prop[0].src_pipe = IPA_CLIENT_ETHERNET_PROD;
 	rx_prop[0].hdr_l2_type = inp->hdr_info[0].hdr_type;
 	if (inp->is_meta_data_valid) {
 		rx_prop[0].attrib.attrib_mask |= IPA_FLT_META_DATA;
@@ -203,7 +203,7 @@
 	}
 
 	rx_prop[1].ip = IPA_IP_v6;
-	rx_prop[1].src_pipe = IPA_CLIENT_ODU_PROD;
+	rx_prop[1].src_pipe = IPA_CLIENT_ETHERNET_PROD;
 	rx_prop[1].hdr_l2_type = inp->hdr_info[1].hdr_type;
 	if (inp->is_meta_data_valid) {
 		rx_prop[1].attrib.attrib_mask |= IPA_FLT_META_DATA;
@@ -229,9 +229,9 @@
 fail:
 	kfree(hdr);
 fail_alloc:
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS);
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS);
 fail_create_rm_cons:
-	ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+	ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
 	return ret;
 }
 
@@ -349,18 +349,18 @@
 		return -EINVAL;
 	}
 
-	result = ipa_rm_add_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+	result = ipa_rm_add_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
 		IPA_RM_RESOURCE_APPS_CONS);
 	if (result) {
 		IPA_UC_OFFLOAD_ERR("fail to add rm dependency: %d\n", result);
 		return result;
 	}
 
-	result = ipa_rm_request_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+	result = ipa_rm_request_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
 	if (result == -EINPROGRESS) {
 		if (wait_for_completion_timeout(&ntn_ctx->ntn_completion,
 			10*HZ) == 0) {
-			IPA_UC_OFFLOAD_ERR("ODU PROD resource req time out\n");
+			IPA_UC_OFFLOAD_ERR("ETH_PROD resource req time out\n");
 			result = -EFAULT;
 			goto fail;
 		}
@@ -384,7 +384,7 @@
 	return 0;
 
 fail:
-	ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+	ipa_rm_delete_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
 		IPA_RM_RESOURCE_APPS_CONS);
 	return result;
 }
@@ -448,10 +448,10 @@
 	rm_profile.max_supported_bandwidth_mbps =
 		profile->max_supported_bw_mbps;
 
-	if (profile->client == IPA_CLIENT_ODU_PROD) {
-		resource_name = IPA_RM_RESOURCE_ODU_ADAPT_PROD;
-	} else if (profile->client == IPA_CLIENT_ODU_TETH_CONS) {
-		resource_name = IPA_RM_RESOURCE_ODU_ADAPT_CONS;
+	if (profile->client == IPA_CLIENT_ETHERNET_PROD) {
+		resource_name = IPA_RM_RESOURCE_ETHERNET_PROD;
+	} else if (profile->client == IPA_CLIENT_ETHERNET_CONS) {
+		resource_name = IPA_RM_RESOURCE_ETHERNET_CONS;
 	} else {
 		IPA_UC_OFFLOAD_ERR("not supported\n");
 		return -EINVAL;
@@ -473,22 +473,22 @@
 
 	ntn_ctx->state = IPA_UC_OFFLOAD_STATE_DOWN;
 
-	ret = ipa_rm_release_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD);
+	ret = ipa_rm_release_resource(IPA_RM_RESOURCE_ETHERNET_PROD);
 	if (ret) {
-		IPA_UC_OFFLOAD_ERR("fail to release ODU_ADAPT_PROD res: %d\n",
+		IPA_UC_OFFLOAD_ERR("fail to release ETHERNET_PROD res: %d\n",
 						  ret);
 		return -EFAULT;
 	}
 
-	ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ODU_ADAPT_PROD,
+	ret = ipa_rm_delete_dependency(IPA_RM_RESOURCE_ETHERNET_PROD,
 		IPA_RM_RESOURCE_APPS_CONS);
 	if (ret) {
-		IPA_UC_OFFLOAD_ERR("fail to del dep ODU->APPS, %d\n", ret);
+		IPA_UC_OFFLOAD_ERR("fail to del dep ETH_PROD->APPS, %d\n", ret);
 		return -EFAULT;
 	}
 
-	ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ODU_PROD);
-	ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ODU_TETH_CONS);
+	ipa_ep_idx_ul = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_PROD);
+	ipa_ep_idx_dl = ipa_get_ep_mapping(IPA_CLIENT_ETHERNET_CONS);
 	ret = ipa_tear_down_uc_offload_pipes(ipa_ep_idx_ul, ipa_ep_idx_dl);
 	if (ret) {
 		IPA_UC_OFFLOAD_ERR("fail to tear down ntn offload pipes, %d\n",
@@ -541,13 +541,13 @@
 	int len, result = 0;
 	struct ipa_ioc_del_hdr *hdr;
 
-	if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_PROD)) {
-		IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_PROD resource\n");
+	if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_PROD)) {
+		IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_PROD resource\n");
 		return -EFAULT;
 	}
 
-	if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ODU_ADAPT_CONS)) {
-		IPA_UC_OFFLOAD_ERR("fail to delete ODU_ADAPT_CONS resource\n");
+	if (ipa_rm_delete_resource(IPA_RM_RESOURCE_ETHERNET_CONS)) {
+		IPA_UC_OFFLOAD_ERR("fail to delete ETHERNET_CONS resource\n");
 		return -EFAULT;
 	}
 
diff --git a/drivers/platform/msm/ipa/ipa_rm.c b/drivers/platform/msm/ipa/ipa_rm.c
index 1431dcf..ea91b13 100644
--- a/drivers/platform/msm/ipa/ipa_rm.c
+++ b/drivers/platform/msm/ipa/ipa_rm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -28,6 +28,7 @@
 	__stringify(IPA_RM_RESOURCE_WLAN_PROD),
 	__stringify(IPA_RM_RESOURCE_ODU_ADAPT_PROD),
 	__stringify(IPA_RM_RESOURCE_MHI_PROD),
+	__stringify(IPA_RM_RESOURCE_ETHERNET_PROD),
 	__stringify(IPA_RM_RESOURCE_Q6_CONS),
 	__stringify(IPA_RM_RESOURCE_USB_CONS),
 	__stringify(IPA_RM_RESOURCE_USB_DPL_CONS),
@@ -36,6 +37,7 @@
 	__stringify(IPA_RM_RESOURCE_APPS_CONS),
 	__stringify(IPA_RM_RESOURCE_ODU_ADAPT_CONS),
 	__stringify(IPA_RM_RESOURCE_MHI_CONS),
+	__stringify(IPA_RM_RESOURCE_ETHERNET_CONS),
 };
 
 struct ipa_rm_profile_vote_type {
diff --git a/drivers/platform/msm/ipa/ipa_rm_resource.c b/drivers/platform/msm/ipa/ipa_rm_resource.c
index 6657bd9..9e74a3f 100644
--- a/drivers/platform/msm/ipa/ipa_rm_resource.c
+++ b/drivers/platform/msm/ipa/ipa_rm_resource.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -38,6 +38,7 @@
 	case IPA_RM_RESOURCE_WLAN_PROD:
 	case IPA_RM_RESOURCE_ODU_ADAPT_PROD:
 	case IPA_RM_RESOURCE_MHI_PROD:
+	case IPA_RM_RESOURCE_ETHERNET_PROD:
 		break;
 	default:
 		result = IPA_RM_INDEX_INVALID;
@@ -69,6 +70,7 @@
 	case IPA_RM_RESOURCE_ODU_ADAPT_CONS:
 	case IPA_RM_RESOURCE_MHI_CONS:
 	case IPA_RM_RESOURCE_USB_DPL_CONS:
+	case IPA_RM_RESOURCE_ETHERNET_CONS:
 		break;
 	default:
 		result = IPA_RM_INDEX_INVALID;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 964d6c8..3dca3e6 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -18,6 +18,7 @@
 #include "ipa_i.h"
 #include "ipa_trace.h"
 
+#define IPA_WAN_AGGR_PKT_CNT 5
 #define IPA_LAST_DESC_CNT 0xFFFF
 #define POLLING_INACTIVITY_RX 40
 #define POLLING_INACTIVITY_TX 40
@@ -1099,16 +1100,18 @@
 			break;
 
 		ipa_wq_rx_common(ep->sys, iov.size);
-		cnt += 5;
+		cnt += IPA_WAN_AGGR_PKT_CNT;
 	};
 
-	if (cnt == 0) {
+	if (cnt == 0 || cnt < weight) {
 		ep->inactive_cycles++;
 		ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
 
 		if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
 			ep->switch_to_intr = true;
 			delay = 0;
+		} else if (cnt < weight) {
+			delay = 0;
 		}
 		queue_delayed_work(ep->sys->wq,
 			&ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
@@ -3176,14 +3179,9 @@
 				sys->repl_hdlr =
 				   ipa_replenish_rx_cache;
 			}
-			if (in->napi_enabled) {
-				sys->rx_pool_sz =
-					   IPA_WAN_NAPI_CONS_RX_POOL_SZ;
-				if (in->recycle_enabled) {
-					sys->repl_hdlr =
-					   ipa_replenish_rx_cache_recycle;
-				}
-			}
+			if (in->napi_enabled && in->recycle_enabled)
+				sys->repl_hdlr =
+					ipa_replenish_rx_cache_recycle;
 			sys->ep->wakelock_client =
 			   IPA_WAKELOCK_REF_CLIENT_WAN_RX;
 			in->ipa_ep_cfg.aggr.aggr_sw_eof_active
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index 672c620..cd575fe 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -51,8 +51,6 @@
 #define IPA_UC_FINISH_MAX 6
 #define IPA_UC_WAIT_MIN_SLEEP 1000
 #define IPA_UC_WAII_MAX_SLEEP 1200
-#define IPA_WAN_NAPI_CONS_RX_POOL_SZ (IPA_GENERIC_RX_POOL_SZ*3)
-#define IPA_WAN_CONS_DESC_FIFO_SZ (IPA_SYS_DESC_FIFO_SZ*3)
 
 #define IPA_MAX_STATUS_STAT_NUM 30
 
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
index 2a68970..4b62927 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_intf.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -538,6 +538,8 @@
 			mutex_unlock(&ipa_ctx->msg_lock);
 			if (copy_to_user(buf, &msg->meta,
 					  sizeof(struct ipa_msg_meta))) {
+				kfree(msg);
+				msg = NULL;
 				ret = -EFAULT;
 				break;
 			}
@@ -546,6 +548,8 @@
 			if (msg->buff) {
 				if (copy_to_user(buf, msg->buff,
 						  msg->meta.msg_len)) {
+					kfree(msg);
+					msg = NULL;
 					ret = -EFAULT;
 					break;
 				}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
index 78d67a5..a50665c 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_utils.c
@@ -94,6 +94,7 @@
 	[IPA_1_1][IPA_CLIENT_Q6_LAN_PROD]        =  5,
 	[IPA_1_1][IPA_CLIENT_Q6_WAN_PROD]        = -1,
 	[IPA_1_1][IPA_CLIENT_Q6_CMD_PROD]        = -1,
+	[IPA_1_1][IPA_CLIENT_ETHERNET_PROD]      = -1,
 
 	[IPA_1_1][IPA_CLIENT_HSIC1_CONS]         = 14,
 	[IPA_1_1][IPA_CLIENT_WLAN1_CONS]         = -1,
@@ -119,6 +120,7 @@
 	[IPA_1_1][IPA_CLIENT_MHI_CONS]           = -1,
 	[IPA_1_1][IPA_CLIENT_Q6_LAN_CONS]        =  4,
 	[IPA_1_1][IPA_CLIENT_Q6_WAN_CONS]        = -1,
+	[IPA_1_1][IPA_CLIENT_ETHERNET_CONS]      = -1,
 
 
 	[IPA_2_0][IPA_CLIENT_HSIC1_PROD]         = 12,
@@ -148,6 +150,7 @@
 						 =  12,
 	[IPA_2_0][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
 						 =  19,
+	[IPA_2_0][IPA_CLIENT_ETHERNET_PROD]      = 12,
 	/* Only for test purpose */
 	[IPA_2_0][IPA_CLIENT_TEST_PROD]          = 19,
 	[IPA_2_0][IPA_CLIENT_TEST1_PROD]         = 19,
@@ -188,6 +191,7 @@
 						 =  16,
 	[IPA_2_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]
 						 =  10,
+	[IPA_2_0][IPA_CLIENT_ETHERNET_CONS]      = 1,
 	/* Only for test purpose */
 	[IPA_2_0][IPA_CLIENT_TEST_CONS]          = 1,
 	[IPA_2_0][IPA_CLIENT_TEST1_CONS]         = 1,
@@ -223,6 +227,7 @@
 						 =  -1,
 	[IPA_2_6L][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD]
 						 =  -1,
+	[IPA_2_6L][IPA_CLIENT_ETHERNET_PROD]      = -1,
 	/* Only for test purpose */
 	[IPA_2_6L][IPA_CLIENT_TEST_PROD]          = 11,
 	[IPA_2_6L][IPA_CLIENT_TEST1_PROD]         = 11,
@@ -263,6 +268,7 @@
 						 =  -1,
 	[IPA_2_6L][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]
 						 =  -1,
+	[IPA_2_6L][IPA_CLIENT_ETHERNET_CONS]      = -1,
 	/* Only for test purpose */
 	[IPA_2_6L][IPA_CLIENT_TEST_CONS]          = 15,
 	[IPA_2_6L][IPA_CLIENT_TEST1_CONS]         = 15,
@@ -457,6 +463,9 @@
 		clients->names[i++] = IPA_CLIENT_ODU_EMB_CONS;
 		clients->names[i++] = IPA_CLIENT_ODU_TETH_CONS;
 		break;
+	case IPA_RM_RESOURCE_ETHERNET_CONS:
+		clients->names[i++] = IPA_CLIENT_ETHERNET_CONS;
+		break;
 	case IPA_RM_RESOURCE_USB_PROD:
 		clients->names[i++] = IPA_CLIENT_USB_PROD;
 		break;
@@ -468,6 +477,10 @@
 		break;
 	case IPA_RM_RESOURCE_ODU_ADAPT_PROD:
 		clients->names[i++] = IPA_CLIENT_ODU_PROD;
+		break;
+	case IPA_RM_RESOURCE_ETHERNET_PROD:
+		clients->names[i++] = IPA_CLIENT_ETHERNET_PROD;
+		break;
 	default:
 		break;
 	}
@@ -507,7 +520,8 @@
 	    client == IPA_CLIENT_WLAN3_CONS   ||
 	    client == IPA_CLIENT_WLAN4_CONS   ||
 	    client == IPA_CLIENT_ODU_EMB_CONS ||
-	    client == IPA_CLIENT_ODU_TETH_CONS)
+	    client == IPA_CLIENT_ODU_TETH_CONS ||
+	    client == IPA_CLIENT_ETHERNET_CONS)
 		return true;
 
 	return false;
@@ -3630,7 +3644,8 @@
 	meta.qmap_id = param_in->qmap_id;
 	if (param_in->client == IPA_CLIENT_USB_PROD ||
 	    param_in->client == IPA_CLIENT_HSIC1_PROD ||
-	    param_in->client == IPA_CLIENT_ODU_PROD) {
+	    param_in->client == IPA_CLIENT_ODU_PROD ||
+	    param_in->client == IPA_CLIENT_ETHERNET_PROD) {
 		result = ipa2_cfg_ep_metadata(ipa_ep_idx, &meta);
 	} else if (param_in->client == IPA_CLIENT_WLAN1_PROD) {
 		ipa_ctx->ep[ipa_ep_idx].cfg.meta = meta;
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index db732c5..0af9387 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -64,6 +64,7 @@
 #define IPA_UEVENT_NUM_EVNP 4 /* number of event pointers */
 
 #define NAPI_WEIGHT 60
+#define IPA_WWAN_CONS_DESC_FIFO_SZ 1024
 
 static struct net_device *ipa_netdevs[IPA_WWAN_DEVICE_COUNT];
 static struct ipa_sys_connect_params apps_to_ipa_ep_cfg, ipa_to_apps_ep_cfg;
@@ -102,6 +103,7 @@
 	bool ipa_loaduC;
 	bool ipa_advertise_sg_support;
 	bool ipa_napi_enable;
+	u32 wan_rx_desc_size;
 };
 
 static struct ipa_rmnet_plat_drv_res ipa_rmnet_res;
@@ -1310,10 +1312,8 @@
 	ipa_to_apps_ep_cfg.priv = dev;
 
 	ipa_to_apps_ep_cfg.napi_enabled = ipa_rmnet_res.ipa_napi_enable;
-	if (ipa_to_apps_ep_cfg.napi_enabled)
-		ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
-	else
-		ipa_to_apps_ep_cfg.desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	ipa_to_apps_ep_cfg.desc_fifo_sz =
+		ipa_rmnet_res.wan_rx_desc_size * sizeof(struct sps_iovec);
 
 	mutex_lock(&ipa_to_apps_pipe_handle_guard);
 	if (atomic_read(&is_ssr)) {
@@ -1944,6 +1944,9 @@
 static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
 		struct ipa_rmnet_plat_drv_res *ipa_rmnet_drv_res)
 {
+	int result;
+
+	ipa_rmnet_drv_res->wan_rx_desc_size = IPA_WWAN_CONS_DESC_FIFO_SZ;
 	ipa_rmnet_drv_res->ipa_rmnet_ssr =
 			of_property_read_bool(pdev->dev.of_node,
 			"qcom,rmnet-ipa-ssr");
@@ -1966,6 +1969,18 @@
 			"qcom,ipa-napi-enable");
 	pr_info("IPA Napi Enable = %s\n",
 		ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
+
+	/* Get IPA WAN RX desc fifo size */
+	result = of_property_read_u32(pdev->dev.of_node,
+			"qcom,wan-rx-desc-size",
+			&ipa_rmnet_drv_res->wan_rx_desc_size);
+	if (result)
+		pr_info("using default for wan-rx-desc-size = %u\n",
+				ipa_rmnet_drv_res->wan_rx_desc_size);
+	else
+		IPAWANDBG(": found ipa_drv_res->wan-rx-desc-size = %u\n",
+				ipa_rmnet_drv_res->wan_rx_desc_size);
+
 	return 0;
 }
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index ca63518..5fd6dcc 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -4343,11 +4343,8 @@
 	}
 
 	ipa3_ctx->logbuf = ipc_log_context_create(IPA_IPC_LOG_PAGES, "ipa", 0);
-	if (ipa3_ctx->logbuf == NULL) {
-		IPAERR("failed to get logbuf\n");
-		result = -ENOMEM;
-		goto fail_logbuf;
-	}
+	if (ipa3_ctx->logbuf == NULL)
+		IPAERR("failed to create IPC log, continue...\n");
 
 	ipa3_ctx->pdev = ipa_dev;
 	ipa3_ctx->uc_pdev = ipa_dev;
@@ -4793,8 +4790,8 @@
 fail_mem_ctrl:
 	kfree(ipa3_ctx->ipa_tz_unlock_reg);
 fail_tz_unlock_reg:
-	ipc_log_context_destroy(ipa3_ctx->logbuf);
-fail_logbuf:
+	if (ipa3_ctx->logbuf)
+		ipc_log_context_destroy(ipa3_ctx->logbuf);
 	kfree(ipa3_ctx);
 	ipa3_ctx = NULL;
 fail_mem_ctx:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 3fb767c..a414029 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -1213,12 +1213,7 @@
 			"TX bamFifoUsageLow=%u\n"
 			"TX bamUtilCount=%u\n"
 			"TX num_db=%u\n"
-			"TX num_unexpected_db=%u\n"
-			"TX num_bam_int_handled=%u\n"
-			"TX num_bam_int_in_non_running_state=%u\n"
-			"TX num_qmb_int_handled=%u\n"
-			"TX num_bam_int_handled_while_wait_for_bam=%u\n"
-			"TX num_bam_int_handled_while_not_in_bam=%u\n",
+			"TX num_qmb_int_handled=%u\n",
 			TX_STATS(num_pkts_processed),
 			TX_STATS(tail_ptr_val),
 			TX_STATS(num_db_fired),
@@ -1233,12 +1228,7 @@
 			TX_STATS(bam_stats.bamFifoUsageLow),
 			TX_STATS(bam_stats.bamUtilCount),
 			TX_STATS(num_db),
-			TX_STATS(num_unexpected_db),
-			TX_STATS(num_bam_int_handled),
-			TX_STATS(num_bam_int_in_non_running_state),
-			TX_STATS(num_qmb_int_handled),
-			TX_STATS(num_bam_int_handled_while_wait_for_bam),
-			TX_STATS(num_bam_int_handled_while_not_in_bam));
+			TX_STATS(num_qmb_int_handled));
 		cnt += nbytes;
 		nbytes = scnprintf(dbg_buff + cnt, IPA_MAX_MSG_LEN - cnt,
 			"RX max_outstanding_pkts=%u\n"
@@ -1254,12 +1244,7 @@
 			"RX bamFifoUsageHigh=%u\n"
 			"RX bamFifoUsageLow=%u\n"
 			"RX bamUtilCount=%u\n"
-			"RX num_bam_int_handled=%u\n"
-			"RX num_db=%u\n"
-			"RX num_unexpected_db=%u\n"
-			"RX num_pkts_in_dis_uninit_state=%u\n"
-			"num_ic_inj_vdev_change=%u\n"
-			"num_ic_inj_fw_desc_change=%u\n",
+			"RX num_db=%u\n",
 			RX_STATS(max_outstanding_pkts),
 			RX_STATS(num_pkts_processed),
 			RX_STATS(rx_ring_rp_value),
@@ -1273,12 +1258,7 @@
 			RX_STATS(bam_stats.bamFifoUsageHigh),
 			RX_STATS(bam_stats.bamFifoUsageLow),
 			RX_STATS(bam_stats.bamUtilCount),
-			RX_STATS(num_bam_int_handled),
-			RX_STATS(num_db),
-			RX_STATS(num_unexpected_db),
-			RX_STATS(num_pkts_in_dis_uninit_state),
-			RX_STATS(num_bam_int_handled_while_not_in_bam),
-			RX_STATS(num_bam_int_handled_while_in_bam_state));
+			RX_STATS(num_db));
 		cnt += nbytes;
 	} else {
 		nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 343cc14..3937cfe 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -21,6 +21,7 @@
 #include "ipahal/ipahal.h"
 #include "ipahal/ipahal_fltrt.h"
 
+#define IPA_WAN_AGGR_PKT_CNT 5
 #define IPA_LAST_DESC_CNT 0xFFFF
 #define POLLING_INACTIVITY_RX 40
 #define POLLING_MIN_SLEEP_RX 1010
@@ -60,7 +61,6 @@
 #define IPA_ODU_RX_POOL_SZ 64
 #define IPA_SIZE_DL_CSUM_META_TRAILER 8
 
-#define IPA_GSI_EVT_RING_LEN 4096
 #define IPA_GSI_MAX_CH_LOW_WEIGHT 15
 #define IPA_GSI_EVT_RING_INT_MODT (32 * 1) /* 1ms under 32KHz clock */
 
@@ -73,12 +73,6 @@
 
 #define IPA_TX_SEND_COMPL_NOP_DELAY_NS (2 * 1000 * 1000)
 
-/*
- * The transport descriptor size was changed to GSI_CHAN_RE_SIZE_16B, but
- * IPA users still use sps_iovec size as FIFO element size.
- */
-#define IPA_FIFO_ELEMENT_SIZE 8
-
 static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
 static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
 static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
@@ -787,14 +781,8 @@
 	sys = container_of(dwork, struct ipa3_sys_context, switch_to_intr_work);
 
 	if (sys->ep->napi_enabled) {
-		if (sys->ep->switch_to_intr) {
-			ipa3_rx_switch_to_intr_mode(sys);
-			IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI");
-			sys->ep->switch_to_intr = false;
-			sys->ep->inactive_cycles = 0;
-		} else
-			sys->ep->client_notify(sys->ep->priv,
-				IPA_CLIENT_START_POLL, 0);
+		ipa3_rx_switch_to_intr_mode(sys);
+		IPA_ACTIVE_CLIENTS_DEC_SPECIAL("NAPI");
 	} else
 		ipa3_handle_rx(sys);
 }
@@ -867,7 +855,8 @@
 		snprintf(buff, IPA_RESOURCE_NAME_MAX, "ipawq%d",
 				sys_in->client);
 		ep->sys->wq = alloc_workqueue(buff,
-				WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+				WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1);
+
 		if (!ep->sys->wq) {
 			IPAERR("failed to create wq for client %d\n",
 					sys_in->client);
@@ -878,7 +867,7 @@
 		snprintf(buff, IPA_RESOURCE_NAME_MAX, "iparepwq%d",
 				sys_in->client);
 		ep->sys->repl_wq = alloc_workqueue(buff,
-				WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
+				WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_SYSFS, 1);
 		if (!ep->sys->repl_wq) {
 			IPAERR("failed to create rep wq for client %d\n",
 					sys_in->client);
@@ -1029,7 +1018,6 @@
 
 	ipa3_disable_data_path(clnt_hdl);
 	if (ep->napi_enabled) {
-		ep->switch_to_intr = true;
 		do {
 			usleep_range(95, 105);
 		} while (atomic_read(&ep->sys->curr_polling_state));
@@ -2676,8 +2664,7 @@
 static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
 		struct ipa3_sys_context *sys)
 {
-	if (in->client == IPA_CLIENT_APPS_CMD_PROD ||
-		in->client == IPA_CLIENT_APPS_WAN_PROD) {
+	if (in->client == IPA_CLIENT_APPS_CMD_PROD) {
 		sys->policy = IPA_POLICY_INTR_MODE;
 		sys->use_comm_evt_ring = false;
 		return 0;
@@ -2742,9 +2729,6 @@
 					sys->repl_hdlr =
 					   ipa3_replenish_rx_cache;
 				}
-				if (in->napi_enabled)
-					sys->rx_pool_sz =
-					   IPA_WAN_NAPI_CONS_RX_POOL_SZ;
 				if (in->napi_enabled && in->recycle_enabled)
 					sys->repl_hdlr =
 					 ipa3_replenish_rx_cache_recycle;
@@ -3441,7 +3425,13 @@
 		gsi_evt_ring_props.re_size =
 			GSI_EVT_RING_RE_SIZE_16B;
 
+		/*
+		 * GSI ring length is calculated based on the desc_fifo_sz
+		 * which was meant to define the BAM desc fifo. GSI descriptors
+		 * are 16B as opposed to 8B for BAM.
+		 */
 		gsi_evt_ring_props.ring_len = 2 * in->desc_fifo_sz;
+
 		gsi_evt_ring_props.ring_base_vaddr =
 			dma_alloc_coherent(ipa3_ctx->pdev,
 			gsi_evt_ring_props.ring_len,
@@ -3657,7 +3647,7 @@
  * function is exectued in the softirq context
  *
  * if input budget is zero, the driver switches back to
- * interrupt mode
+ * interrupt mode.
  *
  * return number of polled packets, on error 0(zero)
  */
@@ -3666,8 +3656,8 @@
 	struct ipa3_ep_context *ep;
 	int ret;
 	int cnt = 0;
-	unsigned int delay = 1;
 	struct ipa_mem_buffer mem_info = {0};
+	static int total_cnt;
 
 	IPADBG("\n");
 	if (clnt_hdl >= ipa3_ctx->ipa_num_pipes ||
@@ -3686,21 +3676,20 @@
 			break;
 
 		ipa3_wq_rx_common(ep->sys, mem_info.size);
-		cnt += 5;
+		cnt += IPA_WAN_AGGR_PKT_CNT;
+		total_cnt++;
+
+		if (ep->sys->len == 0 || total_cnt >= ep->sys->rx_pool_sz) {
+			total_cnt = 0;
+			cnt = cnt-1;
+			break;
+		}
 	};
 
-	if (cnt == 0) {
-		ep->inactive_cycles++;
+	if (cnt < weight) {
 		ep->client_notify(ep->priv, IPA_CLIENT_COMP_NAPI, 0);
-
-		if (ep->inactive_cycles > 3 || ep->sys->len == 0) {
-			ep->switch_to_intr = true;
-			delay = 0;
-		}
-		queue_delayed_work(ep->sys->wq,
-			&ep->sys->switch_to_intr_work, msecs_to_jiffies(delay));
-	} else
-		ep->inactive_cycles = 0;
+		queue_work(ep->sys->wq, &ep->sys->switch_to_intr_work.work);
+	}
 
 	return cnt;
 }
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 7419a64..8261a26 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -54,8 +54,11 @@
 #define IPA_UC_FINISH_MAX 6
 #define IPA_UC_WAIT_MIN_SLEEP 1000
 #define IPA_UC_WAII_MAX_SLEEP 1200
-#define IPA_WAN_NAPI_CONS_RX_POOL_SZ (IPA_GENERIC_RX_POOL_SZ*3)
-#define IPA_WAN_CONS_DESC_FIFO_SZ (IPA_SYS_DESC_FIFO_SZ*3)
+/*
+ * The transport descriptor size was changed to GSI_CHAN_RE_SIZE_16B, but
+ * IPA users still use sps_iovec size as FIFO element size.
+ */
+#define IPA_FIFO_ELEMENT_SIZE 8
 
 #define IPA_MAX_STATUS_STAT_NUM 30
 
@@ -528,8 +531,6 @@
 	bool disconnect_in_progress;
 	u32 qmi_request_sent;
 	bool napi_enabled;
-	bool switch_to_intr;
-	int inactive_cycles;
 	u32 eot_in_poll_err;
 
 	/* sys MUST be the last element of this struct */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
index b9f5755..da965e7 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_intf.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -548,6 +548,8 @@
 			if (copy_to_user(buf, &msg->meta,
 					  sizeof(struct ipa_msg_meta))) {
 				ret = -EFAULT;
+				kfree(msg);
+				msg = NULL;
 				break;
 			}
 			buf += sizeof(struct ipa_msg_meta);
@@ -556,6 +558,8 @@
 				if (copy_to_user(buf, msg->buff,
 						  msg->meta.msg_len)) {
 					ret = -EFAULT;
+					kfree(msg);
+					msg = NULL;
 					break;
 				}
 				buf += msg->meta.msg_len;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
index 30243da..ce47623 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_ntn.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -117,12 +117,7 @@
 	TX_STATS(bam_stats.bamFifoUsageLow);
 	TX_STATS(bam_stats.bamUtilCount);
 	TX_STATS(num_db);
-	TX_STATS(num_unexpected_db);
-	TX_STATS(num_bam_int_handled);
-	TX_STATS(num_bam_int_in_non_running_state);
 	TX_STATS(num_qmb_int_handled);
-	TX_STATS(num_bam_int_handled_while_wait_for_bam);
-	TX_STATS(num_bam_int_handled_while_not_in_bam);
 
 	RX_STATS(max_outstanding_pkts);
 	RX_STATS(num_pkts_processed);
@@ -137,12 +132,7 @@
 	RX_STATS(bam_stats.bamFifoUsageHigh);
 	RX_STATS(bam_stats.bamFifoUsageLow);
 	RX_STATS(bam_stats.bamUtilCount);
-	RX_STATS(num_bam_int_handled);
 	RX_STATS(num_db);
-	RX_STATS(num_unexpected_db);
-	RX_STATS(num_pkts_in_dis_uninit_state);
-	RX_STATS(num_bam_int_handled_while_not_in_bam);
-	RX_STATS(num_bam_int_handled_while_in_bam_state);
 
 	IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
 
@@ -253,7 +243,8 @@
 	ep_dl = &ipa3_ctx->ep[ipa_ep_idx_dl];
 
 	if (ep_ul->valid || ep_dl->valid) {
-		IPAERR("EP already allocated.\n");
+		IPAERR("EP already allocated ul:%d dl:%d\n",
+			   ep_ul->valid, ep_dl->valid);
 		return -EFAULT;
 	}
 
@@ -398,7 +389,7 @@
 		goto fail;
 	}
 	ipa3_delete_dflt_flt_rules(ipa_ep_idx_ul);
-	memset(&ipa3_ctx->ep[ipa_ep_idx_dl], 0, sizeof(struct ipa3_ep_context));
+	memset(&ipa3_ctx->ep[ipa_ep_idx_ul], 0, sizeof(struct ipa3_ep_context));
 	IPADBG("ul client (ep: %d) disconnected\n", ipa_ep_idx_ul);
 
 fail:
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
index 946fc7e..79f0973 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc_offload_i.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -388,18 +388,9 @@
  *@num_pkts_processed: Number of packets processed - cumulative
  *@rx_ring_rp_value: Read pointer last advertized to the WLAN FW
  *
- *@ntn_ch_err_type: Information about the channel error (if
- *		available)
  *@rx_ind_ring_stats:
  *@bam_stats:
- *@num_bam_int_handled: Number of Bam Interrupts handled by FW
  *@num_db: Number of times the doorbell was rung
- *@num_unexpected_db: Number of unexpected doorbells
- *@num_pkts_in_dis_uninit_state:
- *@num_bam_int_handled_while_not_in_bam: Number of Bam
- *		Interrupts handled by FW
- *@num_bam_int_handled_while_in_bam_state: Number of Bam
- *   Interrupts handled by FW
  */
 struct NTN3RxInfoData_t {
 	u32  max_outstanding_pkts;
@@ -407,17 +398,12 @@
 	u32  rx_ring_rp_value;
 	struct IpaHwRingStats_t rx_ind_ring_stats;
 	struct IpaHwBamStats_t bam_stats;
-	u32  num_bam_int_handled;
 	u32  num_db;
-	u32  num_unexpected_db;
-	u32  num_pkts_in_dis_uninit_state;
-	u32  num_bam_int_handled_while_not_in_bam;
-	u32  num_bam_int_handled_while_in_bam_state;
 } __packed;
 
 
 /**
- * struct NTNTxInfoData_t - Structure holding the NTN Tx channel
+ * struct NTN3TxInfoData_t - Structure holding the NTN Tx channel
  * Ensure that this is always word aligned
  *
  *@num_pkts_processed: Number of packets processed - cumulative
@@ -427,27 +413,16 @@
  *@tx_comp_ring_stats:
  *@bam_stats:
  *@num_db: Number of times the doorbell was rung
- *@num_unexpected_db: Number of unexpected doorbells
- *@num_bam_int_handled: Number of Bam Interrupts handled by FW
- *@num_bam_int_in_non_running_state: Number of Bam interrupts
- *			while not in Running state
  *@num_qmb_int_handled: Number of QMB interrupts handled
- *@num_bam_int_handled_while_wait_for_bam: Number of times the
- *		Imm Cmd is injected due to fw_desc change
  */
-struct NTNTxInfoData_t {
+struct NTN3TxInfoData_t {
 	u32  num_pkts_processed;
 	u32  tail_ptr_val;
 	u32  num_db_fired;
 	struct IpaHwRingStats_t tx_comp_ring_stats;
 	struct IpaHwBamStats_t bam_stats;
 	u32  num_db;
-	u32  num_unexpected_db;
-	u32  num_bam_int_handled;
-	u32  num_bam_int_in_non_running_state;
 	u32  num_qmb_int_handled;
-	u32  num_bam_int_handled_while_wait_for_bam;
-	u32  num_bam_int_handled_while_not_in_bam;
 } __packed;
 
 
@@ -458,7 +433,7 @@
  */
 struct Ipa3HwStatsNTNInfoData_t {
 	struct NTN3RxInfoData_t rx_ch_stats[IPA_UC_MAX_NTN_RX_CHANNELS];
-	struct NTNTxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS];
+	struct NTN3TxInfoData_t tx_ch_stats[IPA_UC_MAX_NTN_TX_CHANNELS];
 } __packed;
 
 
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 6cfe25d..bc9f693 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -392,6 +392,11 @@
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_PCIE,
 			{ 13, 10, 8, 16, IPA_EE_AP } },
+	[IPA_3_0][IPA_CLIENT_ETHERNET_PROD]          = {
+			2, IPA_v3_0_GROUP_UL, true,
+			IPA_DPS_HPS_SEQ_TYPE_2ND_PKT_PROCESS_PASS_NO_DEC_UCP,
+			QMB_MASTER_SELECT_DDR,
+			{2, 0, 8, 16, IPA_EE_UC} },
 	/* Only for test purpose */
 	[IPA_3_0][IPA_CLIENT_TEST_PROD]           = {
 			1, IPA_v3_0_GROUP_UL, true,
@@ -517,6 +522,11 @@
 			QMB_MASTER_SELECT_PCIE,
 			{ 29, 14, 8, 8, IPA_EE_AP } },
 	[IPA_3_0][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]     = IPA_CLIENT_NOT_USED,
+	[IPA_3_0][IPA_CLIENT_ETHERNET_CONS]          = {
+			24, IPA_v3_0_GROUP_DL, false,
+			IPA_DPS_HPS_SEQ_TYPE_INVALID,
+			QMB_MASTER_SELECT_DDR,
+			{24, 3, 8, 8, IPA_EE_UC} },
 	/* Only for test purpose */
 	[IPA_3_0][IPA_CLIENT_TEST_CONS]           = {
 			26, IPA_v3_0_GROUP_DL, false,
@@ -604,6 +614,7 @@
 	[IPA_3_5][IPA_CLIENT_Q6_DECOMP2_PROD]     = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = IPA_CLIENT_NOT_USED,
+	[IPA_3_5][IPA_CLIENT_ETHERNET_PROD]         = IPA_CLIENT_NOT_USED,
 	/* Only for test purpose */
 	[IPA_3_5][IPA_CLIENT_TEST_PROD]           = {
 			0, IPA_v3_5_GROUP_UL_DL, true,
@@ -701,6 +712,7 @@
 	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS] = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = IPA_CLIENT_NOT_USED,
 	[IPA_3_5][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]     = IPA_CLIENT_NOT_USED,
+	[IPA_3_5][IPA_CLIENT_ETHERNET_CONS]	  = IPA_CLIENT_NOT_USED,
 	/* Only for test purpose */
 	/* MBIM aggregation test pipes should have the same QMB as USB_CONS */
 	[IPA_3_5][IPA_CLIENT_TEST_CONS]           = {
@@ -792,6 +804,7 @@
 			IPA_DPS_HPS_SEQ_TYPE_DMA_ONLY,
 			QMB_MASTER_SELECT_DDR,
 			{ 8, 9, 8, 16, IPA_EE_AP } },
+	[IPA_3_5_MHI][IPA_CLIENT_ETHERNET_PROD]       = IPA_CLIENT_NOT_USED,
 	/* Only for test purpose */
 	[IPA_3_5_MHI][IPA_CLIENT_TEST_PROD]           = {
 			0, IPA_v3_5_MHI_GROUP_DDR, true,
@@ -889,6 +902,7 @@
 			QMB_MASTER_SELECT_PCIE,
 			{ 19, 13, 8, 8, IPA_EE_AP } },
 	[IPA_3_5_MHI][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS]	= IPA_CLIENT_NOT_USED,
+	[IPA_3_5_MHI][IPA_CLIENT_ETHERNET_CONS]       = IPA_CLIENT_NOT_USED,
 	/* Only for test purpose */
 	[IPA_3_5_MHI][IPA_CLIENT_TEST_CONS]           = {
 			15, IPA_v3_5_MHI_GROUP_PCIE, false,
@@ -975,6 +989,7 @@
 	[IPA_3_5_1][IPA_CLIENT_Q6_DECOMP2_PROD]     = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_SYNC_PROD] = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_ASYNC_PROD] = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_ETHERNET_PROD]       = IPA_CLIENT_NOT_USED,
 	/* Only for test purpose */
 	[IPA_3_5_1][IPA_CLIENT_TEST_PROD]           = {
 			0, IPA_v3_5_GROUP_UL_DL, true,
@@ -1068,6 +1083,7 @@
 	[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_SYNC_CONS]  = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_1][IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS] = IPA_CLIENT_NOT_USED,
 	[IPA_3_5_1][IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS] = IPA_CLIENT_NOT_USED,
+	[IPA_3_5_1][IPA_CLIENT_ETHERNET_CONS]       = IPA_CLIENT_NOT_USED,
 	/* Only for test purpose */
 	[IPA_3_5_1][IPA_CLIENT_TEST_CONS]           = {
 			17, IPA_v3_5_GROUP_UL_DL,
@@ -1231,6 +1247,9 @@
 		clients->names[i++] = IPA_CLIENT_ODU_EMB_CONS;
 		clients->names[i++] = IPA_CLIENT_ODU_TETH_CONS;
 		break;
+	case IPA_RM_RESOURCE_ETHERNET_CONS:
+		clients->names[i++] = IPA_CLIENT_ETHERNET_CONS;
+		break;
 	case IPA_RM_RESOURCE_USB_PROD:
 		clients->names[i++] = IPA_CLIENT_USB_PROD;
 		break;
@@ -1242,6 +1261,10 @@
 		break;
 	case IPA_RM_RESOURCE_ODU_ADAPT_PROD:
 		clients->names[i++] = IPA_CLIENT_ODU_PROD;
+		break;
+	case IPA_RM_RESOURCE_ETHERNET_PROD:
+		clients->names[i++] = IPA_CLIENT_ETHERNET_PROD;
+		break;
 	default:
 		break;
 	}
@@ -1282,7 +1305,8 @@
 	    client == IPA_CLIENT_WLAN3_CONS   ||
 	    client == IPA_CLIENT_WLAN4_CONS   ||
 	    client == IPA_CLIENT_ODU_EMB_CONS ||
-	    client == IPA_CLIENT_ODU_TETH_CONS)
+	    client == IPA_CLIENT_ODU_TETH_CONS ||
+	    client == IPA_CLIENT_ETHERNET_CONS)
 		return true;
 
 	return false;
@@ -2742,7 +2766,8 @@
 	meta.qmap_id = param_in->qmap_id;
 	if (param_in->client == IPA_CLIENT_USB_PROD ||
 	    param_in->client == IPA_CLIENT_HSIC1_PROD ||
-	    param_in->client == IPA_CLIENT_ODU_PROD) {
+	    param_in->client == IPA_CLIENT_ODU_PROD ||
+	    param_in->client == IPA_CLIENT_ETHERNET_PROD) {
 		result = ipa3_cfg_ep_metadata(ipa_ep_idx, &meta);
 	} else if (param_in->client == IPA_CLIENT_WLAN1_PROD) {
 		ipa3_ctx->ep[ipa_ep_idx].cfg.meta = meta;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
index 3c8688e7..78fd90b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipahal/ipahal_reg.c
@@ -81,6 +81,9 @@
 	__stringify(IPA_QSB_MAX_WRITES),
 	__stringify(IPA_QSB_MAX_READS),
 	__stringify(IPA_TX_CFG),
+	__stringify(IPA_IDLE_INDICATION_CFG),
+	__stringify(IPA_DPS_SEQUENCER_FIRST),
+	__stringify(IPA_HPS_SEQUENCER_FIRST),
 };
 
 static void ipareg_construct_dummy(enum ipahal_reg_name reg,
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index cf9775b..cb25b09 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -66,6 +66,7 @@
 	((rmnet_ipa3_ctx && rmnet_ipa3_ctx->wwan_priv) ? \
 	  rmnet_ipa3_ctx->wwan_priv->net : NULL)
 
+#define IPA_WWAN_CONS_DESC_FIFO_SZ 256
 
 static int ipa3_wwan_add_ul_flt_rule_to_ipa(void);
 static int ipa3_wwan_del_ul_flt_rule_to_ipa(void);
@@ -90,6 +91,7 @@
 	bool ipa_loaduC;
 	bool ipa_advertise_sg_support;
 	bool ipa_napi_enable;
+	u32 wan_rx_desc_size;
 };
 
 /**
@@ -1274,7 +1276,6 @@
 {
 	int ret = 0;
 	struct ipa_sys_connect_params *ipa_wan_ep_cfg;
-	struct rmnet_phys_ep_conf_s *ep_cfg;
 
 	IPAWANDBG("Get RMNET_IOCTL_SET_INGRESS_DATA_FORMAT\n");
 	ipa_wan_ep_cfg = &rmnet_ipa3_ctx->ipa_to_apps_ep_cfg;
@@ -1296,14 +1297,6 @@
 			   in->u.ingress_format.agg_size;
 			ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_pkt_limit =
 			   in->u.ingress_format.agg_count;
-
-			if (ipa_wan_ep_cfg->napi_enabled) {
-				ipa_wan_ep_cfg->recycle_enabled = true;
-				ep_cfg = (struct rmnet_phys_ep_conf_s *)
-				   rcu_dereference(dev->rx_handler_data);
-				ep_cfg->recycle = ipa_recycle_wan_skb;
-				pr_info("Wan Recycle Enabled\n");
-			}
 		}
 	}
 
@@ -1325,10 +1318,8 @@
 	ipa_wan_ep_cfg->priv = dev;
 
 	ipa_wan_ep_cfg->napi_enabled = ipa3_rmnet_res.ipa_napi_enable;
-	if (ipa_wan_ep_cfg->napi_enabled)
-		ipa_wan_ep_cfg->desc_fifo_sz = IPA_WAN_CONS_DESC_FIFO_SZ;
-	else
-		ipa_wan_ep_cfg->desc_fifo_sz = IPA_SYS_DESC_FIFO_SZ;
+	ipa_wan_ep_cfg->desc_fifo_sz =
+		ipa3_rmnet_res.wan_rx_desc_size * IPA_FIFO_ELEMENT_SIZE;
 
 	mutex_lock(&rmnet_ipa3_ctx->pipe_handle_guard);
 
@@ -2012,6 +2003,9 @@
 static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
 		struct ipa3_rmnet_plat_drv_res *ipa_rmnet_drv_res)
 {
+	int result;
+
+	ipa_rmnet_drv_res->wan_rx_desc_size = IPA_WWAN_CONS_DESC_FIFO_SZ;
 	ipa_rmnet_drv_res->ipa_rmnet_ssr =
 			of_property_read_bool(pdev->dev.of_node,
 			"qcom,rmnet-ipa-ssr");
@@ -2034,6 +2028,18 @@
 			"qcom,ipa-napi-enable");
 	pr_info("IPA Napi Enable = %s\n",
 		ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
+
+	/* Get IPA WAN RX desc fifo size */
+	result = of_property_read_u32(pdev->dev.of_node,
+			"qcom,wan-rx-desc-size",
+			&ipa_rmnet_drv_res->wan_rx_desc_size);
+	if (result)
+		pr_info("using default for wan-rx-desc-size = %u\n",
+				ipa_rmnet_drv_res->wan_rx_desc_size);
+	else
+		IPAWANDBG(": found ipa_drv_res->wan-rx-desc-size = %u\n",
+				ipa_rmnet_drv_res->wan_rx_desc_size);
+
 	return 0;
 }
 
@@ -2351,32 +2357,41 @@
 {
 	struct net_device *netdev = IPA_NETDEV();
 	struct ipa3_wwan_private *wwan_ptr;
+	int ret;
 
-	IPAWANDBG_LOW("Enter...\n");
+	IPAWANDBG("Enter...\n");
+
 	if (netdev == NULL) {
 		IPAWANERR("netdev is NULL.\n");
-		return 0;
+		ret = 0;
+		goto bail;
 	}
 
+	netif_tx_lock_bh(netdev);
 	wwan_ptr = netdev_priv(netdev);
 	if (wwan_ptr == NULL) {
 		IPAWANERR("wwan_ptr is NULL.\n");
-		return 0;
+		ret = 0;
+		goto unlock_and_bail;
 	}
 
 	/* Do not allow A7 to suspend in case there are oustanding packets */
 	if (atomic_read(&wwan_ptr->outstanding_pkts) != 0) {
 		IPAWANDBG("Outstanding packets, postponing AP suspend.\n");
-		return -EAGAIN;
+		ret = -EAGAIN;
+		goto unlock_and_bail;
 	}
 
 	/* Make sure that there is no Tx operation ongoing */
-	netif_tx_lock_bh(netdev);
+	netif_stop_queue(netdev);
 	ipa_rm_release_resource(IPA_RM_RESOURCE_WWAN_0_PROD);
-	netif_tx_unlock_bh(netdev);
-	IPAWANDBG_LOW("Exit\n");
+	ret = 0;
 
-	return 0;
+unlock_and_bail:
+	netif_tx_unlock_bh(netdev);
+bail:
+	IPAWANDBG("Exit with %d\n", ret);
+	return ret;
 }
 
 /**
@@ -2393,10 +2408,10 @@
 {
 	struct net_device *netdev = IPA_NETDEV();
 
-	IPAWANDBG_LOW("Enter...\n");
+	IPAWANDBG("Enter...\n");
 	if (netdev)
 		netif_wake_queue(netdev);
-	IPAWANDBG_LOW("Exit\n");
+	IPAWANDBG("Exit\n");
 
 	return 0;
 }
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index f6fa78f..46dc148 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -278,6 +278,7 @@
 	POWER_SUPPLY_ATTR(dp_dm),
 	POWER_SUPPLY_ATTR(input_current_limited),
 	POWER_SUPPLY_ATTR(input_current_now),
+	POWER_SUPPLY_ATTR(charge_qnovo_enable),
 	POWER_SUPPLY_ATTR(current_qnovo),
 	POWER_SUPPLY_ATTR(voltage_qnovo),
 	POWER_SUPPLY_ATTR(rerun_aicl),
@@ -306,6 +307,7 @@
 	POWER_SUPPLY_ATTR(die_health),
 	POWER_SUPPLY_ATTR(connector_health),
 	POWER_SUPPLY_ATTR(ctm_current_max),
+	POWER_SUPPLY_ATTR(hw_current_max),
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_ATTR(charge_counter_ext),
 	/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 3659b92..b985ecd 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -13,6 +13,7 @@
 #define pr_fmt(fmt) "QCOM-BATT: %s: " fmt, __func__
 
 #include <linux/device.h>
+#include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
@@ -24,7 +25,7 @@
 #include <linux/printk.h>
 #include <linux/pm_wakeup.h>
 #include <linux/slab.h>
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
 
 #define DRV_MAJOR_VERSION	1
 #define DRV_MINOR_VERSION	0
@@ -36,6 +37,7 @@
 #define PL_HW_ABSENT_VOTER		"PL_HW_ABSENT_VOTER"
 #define PL_VOTER			"PL_VOTER"
 #define RESTRICT_CHG_VOTER		"RESTRICT_CHG_VOTER"
+#define ICL_CHANGE_VOTER		"ICL_CHANGE_VOTER"
 
 struct pl_data {
 	int			pl_mode;
@@ -49,14 +51,16 @@
 	struct votable		*pl_disable_votable;
 	struct votable		*pl_awake_votable;
 	struct votable		*hvdcp_hw_inov_dis_votable;
+	struct votable		*usb_icl_votable;
 	struct work_struct	status_change_work;
 	struct work_struct	pl_disable_forever_work;
 	struct delayed_work	pl_taper_work;
 	struct power_supply	*main_psy;
 	struct power_supply	*pl_psy;
 	struct power_supply	*batt_psy;
+	struct power_supply	*usb_psy;
 	int			charge_type;
-	int			main_settled_ua;
+	int			total_settled_ua;
 	int			pl_settled_ua;
 	struct class		qcom_batt_class;
 	struct wakeup_source	*pl_ws;
@@ -92,15 +96,10 @@
  ********/
 static void split_settled(struct pl_data *chip)
 {
-	int slave_icl_pct;
+	int slave_icl_pct, total_current_ua;
 	int slave_ua = 0, main_settled_ua = 0;
 	union power_supply_propval pval = {0, };
-	int rc;
-
-	/* TODO some parallel chargers do not have a fine ICL resolution. For
-	 * them implement a psy interface which returns the closest lower ICL
-	 * for desired split
-	 */
+	int rc, total_settled_ua = 0;
 
 	if ((chip->pl_mode != POWER_SUPPLY_PL_USBIN_USBIN)
 		&& (chip->pl_mode != POWER_SUPPLY_PL_USBIN_USBIN_EXT))
@@ -122,12 +121,31 @@
 		slave_icl_pct = max(0, chip->slave_pct - 10);
 		slave_ua = ((main_settled_ua + chip->pl_settled_ua)
 						* slave_icl_pct) / 100;
+		total_settled_ua = main_settled_ua + chip->pl_settled_ua;
 	}
 
-	/* ICL_REDUCTION on main could be 0mA when pl is disabled */
-	pval.intval = slave_ua;
+	total_current_ua = get_effective_result_locked(chip->usb_icl_votable);
+	if (total_current_ua < 0) {
+		if (!chip->usb_psy)
+			chip->usb_psy = power_supply_get_by_name("usb");
+		if (!chip->usb_psy) {
+			pr_err("Couldn't get usbpsy while splitting settled\n");
+			return;
+		}
+		/* no client is voting, so get the total current from charger */
+		rc = power_supply_get_property(chip->usb_psy,
+			POWER_SUPPLY_PROP_HW_CURRENT_MAX, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't get max current rc=%d\n", rc);
+			return;
+		}
+		total_current_ua = pval.intval;
+	}
+
+	pval.intval = total_current_ua - slave_ua;
+	/* Set ICL on main charger */
 	rc = power_supply_set_property(chip->main_psy,
-			POWER_SUPPLY_PROP_ICL_REDUCTION, &pval);
+				POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
 	if (rc < 0) {
 		pr_err("Couldn't change slave suspend state rc=%d\n", rc);
 		return;
@@ -142,10 +160,12 @@
 		return;
 	}
 
-	/* main_settled_ua represents the total capability of adapter */
-	if (!chip->main_settled_ua)
-		chip->main_settled_ua = main_settled_ua;
+	chip->total_settled_ua = total_settled_ua;
 	chip->pl_settled_ua = slave_ua;
+
+	pl_dbg(chip, PR_PARALLEL,
+		"Split total_current_ua=%d main_settled_ua=%d slave_ua=%d\n",
+		total_current_ua, main_settled_ua, slave_ua);
 }
 
 static ssize_t version_show(struct class *c, struct class_attribute *attr,
@@ -213,6 +233,10 @@
 
 	chip->restricted_charging_enabled = !!val;
 
+	/* disable parallel charger in case of restricted charging */
+	vote(chip->pl_disable_votable, RESTRICT_CHG_VOTER,
+				chip->restricted_charging_enabled, 0);
+
 	vote(chip->fcc_votable, RESTRICT_CHG_VOTER,
 				chip->restricted_charging_enabled,
 				chip->restricted_current);
@@ -487,6 +511,59 @@
 	return 0;
 }
 
+#define ICL_STEP_UV	25000
+static int usb_icl_vote_callback(struct votable *votable, void *data,
+			int icl_ua, const char *client)
+{
+	int rc;
+	struct pl_data *chip = data;
+	union power_supply_propval pval = {0, };
+
+	if (!chip->main_psy)
+		return 0;
+
+	if (client == NULL)
+		icl_ua = INT_MAX;
+
+	/*
+	 * Disable parallel for new ICL vote - the call to split_settled will
+	 * ensure that all the input current limit gets assigned to the main
+	 * charger.
+	 */
+	vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, true, 0);
+
+	/* rerun AICL */
+	/* get the settled current */
+	rc = power_supply_get_property(chip->main_psy,
+			       POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+			       &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+		return rc;
+	}
+
+	/* rerun AICL if new ICL is above settled ICL */
+	if (icl_ua > pval.intval) {
+		/* set a lower ICL */
+		pval.intval = max(pval.intval - ICL_STEP_UV, ICL_STEP_UV);
+		power_supply_set_property(chip->main_psy,
+				POWER_SUPPLY_PROP_CURRENT_MAX,
+				&pval);
+		/* wait for ICL change */
+		msleep(100);
+
+		pval.intval = icl_ua;
+		power_supply_set_property(chip->main_psy,
+				POWER_SUPPLY_PROP_CURRENT_MAX,
+				&pval);
+		/* wait for ICL change */
+		msleep(100);
+	}
+	vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, false, 0);
+
+	return 0;
+}
+
 static void pl_disable_forever_work(struct work_struct *work)
 {
 	struct pl_data *chip = container_of(work,
@@ -508,7 +585,7 @@
 	int rc;
 
 	chip->taper_pct = 100;
-	chip->main_settled_ua = 0;
+	chip->total_settled_ua = 0;
 	chip->pl_settled_ua = 0;
 
 	if (!pl_disable) { /* enable */
@@ -596,13 +673,15 @@
 
 static bool is_main_available(struct pl_data *chip)
 {
-	if (!chip->main_psy)
-		chip->main_psy = power_supply_get_by_name("main");
+	if (chip->main_psy)
+		return true;
 
-	if (!chip->main_psy)
-		return false;
+	chip->main_psy = power_supply_get_by_name("main");
 
-	return true;
+	if (chip->main_psy)
+		rerun_election(chip->usb_icl_votable);
+
+	return !!chip->main_psy;
 }
 
 static bool is_batt_available(struct pl_data *chip)
@@ -711,6 +790,7 @@
 static void handle_settled_icl_change(struct pl_data *chip)
 {
 	union power_supply_propval pval = {0, };
+	int new_total_settled_ua;
 	int rc;
 
 	if (get_effective_result(chip->pl_disable_votable))
@@ -730,9 +810,15 @@
 			return;
 		}
 
+		new_total_settled_ua = pval.intval + chip->pl_settled_ua;
+		pl_dbg(chip, PR_PARALLEL,
+			"total_settled_ua=%d settled_ua=%d new_total_settled_ua=%d\n",
+			chip->total_settled_ua, pval.intval,
+			new_total_settled_ua);
+
 		/* If ICL change is small skip splitting */
-		if (abs((chip->main_settled_ua - chip->pl_settled_ua)
-				- pval.intval) > MIN_ICL_CHANGE_DELTA_UA)
+		if (abs(new_total_settled_ua - chip->total_settled_ua)
+						> MIN_ICL_CHANGE_DELTA_UA)
 			split_settled(chip);
 	} else {
 		rerun_election(chip->fcc_votable);
@@ -855,6 +941,14 @@
 		goto destroy_votable;
 	}
 
+	chip->usb_icl_votable = create_votable("USB_ICL", VOTE_MIN,
+					usb_icl_vote_callback,
+					chip);
+	if (IS_ERR(chip->usb_icl_votable)) {
+		rc = PTR_ERR(chip->usb_icl_votable);
+		goto destroy_votable;
+	}
+
 	chip->pl_disable_votable = create_votable("PL_DISABLE", VOTE_SET_ANY,
 					pl_disable_vote_callback,
 					chip);
@@ -909,6 +1003,7 @@
 	destroy_votable(chip->pl_disable_votable);
 	destroy_votable(chip->fv_votable);
 	destroy_votable(chip->fcc_votable);
+	destroy_votable(chip->usb_icl_votable);
 release_wakeup_source:
 	wakeup_source_unregister(chip->pl_ws);
 cleanup:
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index c0ba5a9..48fe04f 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -29,7 +29,7 @@
 #include <linux/string_helpers.h>
 #include <linux/types.h>
 #include <linux/uaccess.h>
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
 
 #define fg_dbg(chip, reason, fmt, ...)			\
 	do {							\
@@ -46,10 +46,13 @@
 			&& (value) <= (right)))
 
 /* Awake votable reasons */
-#define SRAM_READ	"fg_sram_read"
-#define SRAM_WRITE	"fg_sram_write"
-#define PROFILE_LOAD	"fg_profile_load"
-#define DELTA_SOC	"fg_delta_soc"
+#define SRAM_READ		"fg_sram_read"
+#define SRAM_WRITE		"fg_sram_write"
+#define PROFILE_LOAD		"fg_profile_load"
+#define DELTA_SOC		"fg_delta_soc"
+
+/* Delta BSOC votable reasons */
+#define DELTA_BSOC_IRQ_VOTER	"fg_delta_bsoc_irq"
 
 #define DEBUG_PRINT_BUFFER_SIZE		64
 /* 3 byte address + 1 space character */
@@ -159,6 +162,7 @@
 	FG_SRAM_ESR_TIMER_DISCHG_INIT,
 	FG_SRAM_ESR_TIMER_CHG_MAX,
 	FG_SRAM_ESR_TIMER_CHG_INIT,
+	FG_SRAM_ESR_PULSE_THRESH,
 	FG_SRAM_SYS_TERM_CURR,
 	FG_SRAM_CHG_TERM_CURR,
 	FG_SRAM_DELTA_MSOC_THR,
@@ -250,6 +254,8 @@
 	int	esr_tight_lt_flt_upct;
 	int	esr_broad_lt_flt_upct;
 	int	slope_limit_temp;
+	int	esr_pulse_thresh_ma;
+	int	esr_meas_curr_ma;
 	int	jeita_thresholds[NUM_JEITA_LEVELS];
 	int	ki_coeff_soc[KI_COEFF_SOC_LEVELS];
 	int	ki_coeff_med_dischg[KI_COEFF_SOC_LEVELS];
@@ -330,6 +336,7 @@
 	struct fg_memif		*sram;
 	struct fg_irq_info	*irqs;
 	struct votable		*awake_votable;
+	struct votable		*delta_bsoc_irq_en_votable;
 	struct fg_sram_param	*sp;
 	struct fg_alg_flag	*alg_flags;
 	int			*debug_mask;
@@ -370,8 +377,8 @@
 	bool			esr_fcc_ctrl_en;
 	bool			soc_reporting_ready;
 	bool			esr_flt_cold_temp_en;
-	bool			bsoc_delta_irq_en;
 	bool			slope_limit_en;
+	bool			use_ima_single_mode;
 	struct completion	soc_update;
 	struct completion	soc_ready;
 	struct delayed_work	profile_load_work;
diff --git a/drivers/power/supply/qcom/fg-memif.c b/drivers/power/supply/qcom/fg-memif.c
index 2dc7618..8a949bf 100644
--- a/drivers/power/supply/qcom/fg-memif.c
+++ b/drivers/power/supply/qcom/fg-memif.c
@@ -48,6 +48,10 @@
 	int rc;
 	u8 intf_ctl = 0;
 
+	fg_dbg(chip, FG_SRAM_READ | FG_SRAM_WRITE, "access: %d burst: %d\n",
+		access, burst);
+
+	WARN_ON(burst && chip->use_ima_single_mode);
 	intf_ctl = ((access == FG_WRITE) ? IMA_WR_EN_BIT : 0) |
 			(burst ? MEM_ACS_BURST_BIT : 0);
 
@@ -175,6 +179,7 @@
 {
 	int rc;
 	u8 dma_sts;
+	bool error_present;
 
 	rc = fg_read(chip, MEM_IF_DMA_STS(chip), &dma_sts, 1);
 	if (rc < 0) {
@@ -184,14 +189,13 @@
 	}
 	fg_dbg(chip, FG_STATUS, "dma_sts: %x\n", dma_sts);
 
-	if (dma_sts & (DMA_WRITE_ERROR_BIT | DMA_READ_ERROR_BIT)) {
-		rc = fg_masked_write(chip, MEM_IF_DMA_CTL(chip),
-				DMA_CLEAR_LOG_BIT, DMA_CLEAR_LOG_BIT);
-		if (rc < 0) {
-			pr_err("failed to write addr=0x%04x, rc=%d\n",
-				MEM_IF_DMA_CTL(chip), rc);
-			return rc;
-		}
+	error_present = dma_sts & (DMA_WRITE_ERROR_BIT | DMA_READ_ERROR_BIT);
+	rc = fg_masked_write(chip, MEM_IF_DMA_CTL(chip), DMA_CLEAR_LOG_BIT,
+			error_present ? DMA_CLEAR_LOG_BIT : 0);
+	if (rc < 0) {
+		pr_err("failed to write addr=0x%04x, rc=%d\n",
+			MEM_IF_DMA_CTL(chip), rc);
+		return rc;
 	}
 
 	return 0;
@@ -293,7 +297,9 @@
 		/* check for error condition */
 		rc = fg_clear_ima_errors_if_any(chip, false);
 		if (rc < 0) {
-			pr_err("Failed to check for ima errors rc=%d\n", rc);
+			if (rc != -EAGAIN)
+				pr_err("Failed to check for ima errors rc=%d\n",
+					rc);
 			return rc;
 		}
 
@@ -357,7 +363,12 @@
 		/* check for error condition */
 		rc = fg_clear_ima_errors_if_any(chip, false);
 		if (rc < 0) {
-			pr_err("Failed to check for ima errors rc=%d\n", rc);
+			if (rc == -EAGAIN)
+				pr_err("IMA error cleared, address [%d %d] len %d\n",
+					address, offset, len);
+			else
+				pr_err("Failed to check for ima errors rc=%d\n",
+					rc);
 			return rc;
 		}
 
@@ -365,6 +376,15 @@
 		len -= num_bytes;
 		offset = byte_enable = 0;
 
+		if (chip->use_ima_single_mode && len) {
+			address++;
+			rc = fg_set_address(chip, address);
+			if (rc < 0) {
+				pr_err("failed to set address rc = %d\n", rc);
+				return rc;
+			}
+		}
+
 		rc = fg_check_iacs_ready(chip);
 		if (rc < 0) {
 			pr_debug("IACS_RDY failed rc=%d\n", rc);
@@ -403,22 +423,40 @@
 		/* check for error condition */
 		rc = fg_clear_ima_errors_if_any(chip, false);
 		if (rc < 0) {
-			pr_err("Failed to check for ima errors rc=%d\n", rc);
+			if (rc == -EAGAIN)
+				pr_err("IMA error cleared, address [%d %d] len %d\n",
+					address, offset, len);
+			else
+				pr_err("Failed to check for ima errors rc=%d\n",
+					rc);
 			return rc;
 		}
 
-		if (len && len < BYTES_PER_SRAM_WORD) {
-			/*
-			 * Move to single mode. Changing address is not
-			 * required here as it must be in burst mode. Address
-			 * will get incremented internally by FG HW once the MSB
-			 * of RD_DATA is read.
-			 */
-			rc = fg_config_access_mode(chip, FG_READ, 0);
-			if (rc < 0) {
-				pr_err("failed to move to single mode rc=%d\n",
-					rc);
-				return -EIO;
+		if (chip->use_ima_single_mode) {
+			if (len) {
+				address++;
+				rc = fg_set_address(chip, address);
+				if (rc < 0) {
+					pr_err("failed to set address rc = %d\n",
+						rc);
+					return rc;
+				}
+			}
+		} else {
+			if (len && len < BYTES_PER_SRAM_WORD) {
+				/*
+				 * Move to single mode. Changing address is not
+				 * required here as it must be in burst mode.
+				 * Address will get incremented internally by FG
+				 * HW once the MSB of RD_DATA is read.
+				 */
+				rc = fg_config_access_mode(chip, FG_READ,
+								false);
+				if (rc < 0) {
+					pr_err("failed to move to single mode rc=%d\n",
+						rc);
+					return -EIO;
+				}
 			}
 		}
 
@@ -489,6 +527,7 @@
 		u16 address, int offset, int len, bool access)
 {
 	int rc = 0;
+	bool burst_mode = false;
 
 	if (!is_mem_access_available(chip, access))
 		return -EBUSY;
@@ -503,7 +542,8 @@
 	}
 
 	/* configure for the read/write, single/burst mode */
-	rc = fg_config_access_mode(chip, access, (offset + len) > 4);
+	burst_mode = chip->use_ima_single_mode ? false : ((offset + len) > 4);
+	rc = fg_config_access_mode(chip, access, burst_mode);
 	if (rc < 0) {
 		pr_err("failed to set memory access rc = %d\n", rc);
 		return rc;
@@ -583,7 +623,7 @@
 	if (rc < 0) {
 		count++;
 		if (rc == -EAGAIN) {
-			pr_err("IMA access failed retry_count = %d\n", count);
+			pr_err("IMA read failed retry_count = %d\n", count);
 			goto retry;
 		}
 		pr_err("failed to read SRAM address rc = %d\n", rc);
@@ -667,8 +707,8 @@
 	rc = __fg_interleaved_mem_write(chip, address, offset, val, len);
 	if (rc < 0) {
 		count++;
-		if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
-			pr_err("IMA access failed retry_count = %d\n", count);
+		if (rc == -EAGAIN) {
+			pr_err("IMA write failed retry_count = %d\n", count);
 			goto retry;
 		}
 		pr_err("failed to write SRAM address rc = %d\n", rc);
diff --git a/drivers/power/supply/qcom/fg-reg.h b/drivers/power/supply/qcom/fg-reg.h
index bf2827f..cd0b2fb 100644
--- a/drivers/power/supply/qcom/fg-reg.h
+++ b/drivers/power/supply/qcom/fg-reg.h
@@ -167,6 +167,7 @@
 
 /* BATT_INFO_ESR_PULL_DN_CFG */
 #define ESR_PULL_DOWN_IVAL_MASK			GENMASK(3, 2)
+#define ESR_PULL_DOWN_IVAL_SHIFT		2
 #define ESR_MEAS_CUR_60MA			0x0
 #define ESR_MEAS_CUR_120MA			0x1
 #define ESR_MEAS_CUR_180MA			0x2
diff --git a/drivers/power/supply/qcom/pmic-voter.c b/drivers/power/supply/qcom/pmic-voter.c
index 39a0dcb6..10a1c54 100644
--- a/drivers/power/supply/qcom/pmic-voter.c
+++ b/drivers/power/supply/qcom/pmic-voter.c
@@ -18,9 +18,9 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
 
-#define NUM_MAX_CLIENTS	8
+#define NUM_MAX_CLIENTS		16
 #define DEBUG_FORCE_CLIENT	"DEBUG_FORCE_CLIENT"
 
 static DEFINE_SPINLOCK(votable_list_slock);
@@ -188,6 +188,38 @@
 }
 
 /**
+ * is_client_vote_enabled() -
+ * is_client_vote_enabled_locked() -
+ *		The unlocked and locked variants of getting whether a client's
+		vote is enabled.
+ * @votable:	the votable object
+ * @client_str: client of interest
+ *
+ * Returns:
+ *	True if the client's vote is enabled; false otherwise.
+ */
+bool is_client_vote_enabled_locked(struct votable *votable,
+							const char *client_str)
+{
+	int client_id = get_client_id(votable, client_str);
+
+	if (client_id < 0)
+		return false;
+
+	return votable->votes[client_id].enabled;
+}
+
+bool is_client_vote_enabled(struct votable *votable, const char *client_str)
+{
+	bool enabled;
+
+	lock_votable(votable);
+	enabled = is_client_vote_enabled_locked(votable, client_str);
+	unlock_votable(votable);
+	return enabled;
+}
+
+/**
  * get_client_vote() -
  * get_client_vote_locked() -
  *		The unlocked and locked variants of getting a client's voted
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 304d0cf..7ab5b31 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -31,6 +31,8 @@
 #define FG_MEM_INFO_PMI8998		0x0D
 
 /* SRAM address and offset in ascending order */
+#define ESR_PULSE_THRESH_WORD		2
+#define ESR_PULSE_THRESH_OFFSET		3
 #define SLOPE_LIMIT_WORD		3
 #define SLOPE_LIMIT_OFFSET		0
 #define CUTOFF_VOLT_WORD		5
@@ -216,6 +218,8 @@
 		ESR_TIMER_CHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
 	PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD,
 		ESR_TIMER_CHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+	PARAM(ESR_PULSE_THRESH, ESR_PULSE_THRESH_WORD, ESR_PULSE_THRESH_OFFSET,
+		1, 100000, 390625, 0, fg_encode_default, NULL),
 	PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_WORD,
 		KI_COEFF_MED_DISCHG_OFFSET, 1, 1000, 244141, 0,
 		fg_encode_default, NULL),
@@ -286,6 +290,8 @@
 		ESR_TIMER_CHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
 	PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD,
 		ESR_TIMER_CHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+	PARAM(ESR_PULSE_THRESH, ESR_PULSE_THRESH_WORD, ESR_PULSE_THRESH_OFFSET,
+		1, 100000, 390625, 0, fg_encode_default, NULL),
 	PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_v2_WORD,
 		KI_COEFF_MED_DISCHG_v2_OFFSET, 1, 1000, 244141, 0,
 		fg_encode_default, NULL),
@@ -525,7 +531,7 @@
 }
 
 #define CC_SOC_30BIT	GENMASK(29, 0)
-static int fg_get_cc_soc(struct fg_chip *chip, int *val)
+static int fg_get_charge_raw(struct fg_chip *chip, int *val)
 {
 	int rc, cc_soc;
 
@@ -539,7 +545,7 @@
 	return 0;
 }
 
-static int fg_get_cc_soc_sw(struct fg_chip *chip, int *val)
+static int fg_get_charge_counter(struct fg_chip *chip, int *val)
 {
 	int rc, cc_soc;
 
@@ -981,6 +987,29 @@
 	};
 }
 
+static inline void get_esr_meas_current(int curr_ma, u8 *val)
+{
+	switch (curr_ma) {
+	case 60:
+		*val = ESR_MEAS_CUR_60MA;
+		break;
+	case 120:
+		*val = ESR_MEAS_CUR_120MA;
+		break;
+	case 180:
+		*val = ESR_MEAS_CUR_180MA;
+		break;
+	case 240:
+		*val = ESR_MEAS_CUR_240MA;
+		break;
+	default:
+		*val = ESR_MEAS_CUR_120MA;
+		break;
+	};
+
+	*val <<= ESR_PULL_DOWN_IVAL_SHIFT;
+}
+
 static int fg_set_esr_timer(struct fg_chip *chip, int cycles, bool charging,
 				int flags)
 {
@@ -1054,6 +1083,25 @@
 	fg_dbg(chip, FG_STATUS, "Notified charger on float voltage and FCC\n");
 }
 
+static int fg_delta_bsoc_irq_en_cb(struct votable *votable, void *data,
+					int enable, const char *client)
+{
+	struct fg_chip *chip = data;
+
+	if (!chip->irqs[BSOC_DELTA_IRQ].irq)
+		return 0;
+
+	if (enable) {
+		enable_irq(chip->irqs[BSOC_DELTA_IRQ].irq);
+		enable_irq_wake(chip->irqs[BSOC_DELTA_IRQ].irq);
+	} else {
+		disable_irq_wake(chip->irqs[BSOC_DELTA_IRQ].irq);
+		disable_irq(chip->irqs[BSOC_DELTA_IRQ].irq);
+	}
+
+	return 0;
+}
+
 static int fg_awake_cb(struct votable *votable, void *data, int awake,
 			const char *client)
 {
@@ -1241,7 +1289,7 @@
 		chip->cl.final_cc_uah, old_cap, chip->cl.learned_cc_uah);
 }
 
-static int  fg_cap_learning_process_full_data(struct fg_chip *chip)
+static int fg_cap_learning_process_full_data(struct fg_chip *chip)
 {
 	int rc, cc_soc_sw, cc_soc_delta_pct;
 	int64_t delta_cc_uah;
@@ -1263,30 +1311,39 @@
 	return 0;
 }
 
-static int fg_cap_learning_begin(struct fg_chip *chip, int batt_soc)
+#define BATT_SOC_32BIT	GENMASK(31, 0)
+static int fg_cap_learning_begin(struct fg_chip *chip, u32 batt_soc)
 {
-	int rc, cc_soc_sw;
+	int rc, cc_soc_sw, batt_soc_msb;
 
-	if (DIV_ROUND_CLOSEST(batt_soc * 100, FULL_SOC_RAW) >
+	batt_soc_msb = batt_soc >> 24;
+	if (DIV_ROUND_CLOSEST(batt_soc_msb * 100, FULL_SOC_RAW) >
 		chip->dt.cl_start_soc) {
 		fg_dbg(chip, FG_CAP_LEARN, "Battery SOC %d is high!, not starting\n",
-			batt_soc);
+			batt_soc_msb);
 		return -EINVAL;
 	}
 
-	chip->cl.init_cc_uah = div64_s64(chip->cl.learned_cc_uah * batt_soc,
+	chip->cl.init_cc_uah = div64_s64(chip->cl.learned_cc_uah * batt_soc_msb,
 					FULL_SOC_RAW);
-	rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc_sw);
+
+	/* Prime cc_soc_sw with battery SOC when capacity learning begins */
+	cc_soc_sw = div64_s64((int64_t)batt_soc * CC_SOC_30BIT,
+				BATT_SOC_32BIT);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_CC_SOC_SW].addr_word,
+		chip->sp[FG_SRAM_CC_SOC_SW].addr_byte, (u8 *)&cc_soc_sw,
+		chip->sp[FG_SRAM_CC_SOC_SW].len, FG_IMA_ATOMIC);
 	if (rc < 0) {
-		pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
-		return rc;
+		pr_err("Error in writing cc_soc_sw, rc=%d\n", rc);
+		goto out;
 	}
 
 	chip->cl.init_cc_soc_sw = cc_soc_sw;
 	chip->cl.active = true;
 	fg_dbg(chip, FG_CAP_LEARN, "Capacity learning started @ battery SOC %d init_cc_soc_sw:%d\n",
-		batt_soc, chip->cl.init_cc_soc_sw);
-	return 0;
+		batt_soc_msb, chip->cl.init_cc_soc_sw);
+out:
+	return rc;
 }
 
 static int fg_cap_learning_done(struct fg_chip *chip)
@@ -1318,7 +1375,7 @@
 #define FULL_SOC_RAW	255
 static void fg_cap_learning_update(struct fg_chip *chip)
 {
-	int rc, batt_soc;
+	int rc, batt_soc, batt_soc_msb;
 
 	mutex_lock(&chip->cl.lock);
 
@@ -1337,11 +1394,9 @@
 		goto out;
 	}
 
-	/* We need only the most significant byte here */
-	batt_soc = (u32)batt_soc >> 24;
-
+	batt_soc_msb = (u32)batt_soc >> 24;
 	fg_dbg(chip, FG_CAP_LEARN, "Chg_status: %d cl_active: %d batt_soc: %d\n",
-		chip->charge_status, chip->cl.active, batt_soc);
+		chip->charge_status, chip->cl.active, batt_soc_msb);
 
 	/* Initialize the starting point of learning capacity */
 	if (!chip->cl.active) {
@@ -1363,7 +1418,7 @@
 
 		if (chip->charge_status == POWER_SUPPLY_STATUS_NOT_CHARGING) {
 			fg_dbg(chip, FG_CAP_LEARN, "Capacity learning aborted @ battery SOC %d\n",
-				batt_soc);
+				batt_soc_msb);
 			chip->cl.active = false;
 			chip->cl.init_cc_uah = 0;
 		}
@@ -1470,16 +1525,8 @@
 		return 0;
 
 	mutex_lock(&chip->charge_full_lock);
-	if (!chip->charge_done && chip->bsoc_delta_irq_en) {
-		disable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
-		disable_irq_nosync(fg_irqs[BSOC_DELTA_IRQ].irq);
-		chip->bsoc_delta_irq_en = false;
-	} else if (chip->charge_done && !chip->bsoc_delta_irq_en) {
-		enable_irq(fg_irqs[BSOC_DELTA_IRQ].irq);
-		enable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
-		chip->bsoc_delta_irq_en = true;
-	}
-
+	vote(chip->delta_bsoc_irq_en_votable, DELTA_BSOC_IRQ_VOTER,
+		chip->charge_done, 0);
 	rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
 		&prop);
 	if (rc < 0) {
@@ -1598,6 +1645,9 @@
 	u64 scaling_factor;
 	u32 val = 0;
 
+	if (!chip->dt.rconn_mohms)
+		return 0;
+
 	rc = fg_sram_read(chip, PROFILE_INTEGRITY_WORD,
 			SW_CONFIG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
 	if (rc < 0) {
@@ -1696,6 +1746,9 @@
 	if (!chip->dt.auto_recharge_soc)
 		return 0;
 
+	if (recharge_soc < 0 || recharge_soc > FULL_CAPACITY)
+		return 0;
+
 	fg_encode(chip->sp, FG_SRAM_RECHARGE_SOC_THR, recharge_soc, &buf);
 	rc = fg_sram_write(chip,
 			chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_word,
@@ -1712,46 +1765,55 @@
 static int fg_adjust_recharge_soc(struct fg_chip *chip)
 {
 	int rc, msoc, recharge_soc, new_recharge_soc = 0;
+	bool recharge_soc_status;
 
 	if (!chip->dt.auto_recharge_soc)
 		return 0;
 
 	recharge_soc = chip->dt.recharge_soc_thr;
+	recharge_soc_status = chip->recharge_soc_adjusted;
 	/*
 	 * If the input is present and charging had been terminated, adjust
 	 * the recharge SOC threshold based on the monotonic SOC at which
 	 * the charge termination had happened.
 	 */
-	if (is_input_present(chip) && !chip->recharge_soc_adjusted
-		&& chip->charge_done) {
-		/* Get raw monotonic SOC for calculation */
-		rc = fg_get_msoc(chip, &msoc);
-		if (rc < 0) {
-			pr_err("Error in getting msoc, rc=%d\n", rc);
-			return rc;
-		}
+	if (is_input_present(chip)) {
+		if (chip->charge_done) {
+			if (!chip->recharge_soc_adjusted) {
+				/* Get raw monotonic SOC for calculation */
+				rc = fg_get_msoc(chip, &msoc);
+				if (rc < 0) {
+					pr_err("Error in getting msoc, rc=%d\n",
+						rc);
+					return rc;
+				}
 
-		/* Adjust the recharge_soc threshold */
-		new_recharge_soc = msoc - (FULL_CAPACITY - recharge_soc);
-	} else if (chip->recharge_soc_adjusted && (!is_input_present(chip)
-				|| chip->health == POWER_SUPPLY_HEALTH_GOOD)) {
+				/* Adjust the recharge_soc threshold */
+				new_recharge_soc = msoc - (FULL_CAPACITY -
+								recharge_soc);
+				chip->recharge_soc_adjusted = true;
+			} else {
+				/* adjusted already, do nothing */
+				return 0;
+			}
+		} else {
+			/* Charging, do nothing */
+			return 0;
+		}
+	} else {
 		/* Restore the default value */
 		new_recharge_soc = recharge_soc;
+		chip->recharge_soc_adjusted = false;
 	}
 
-	if (new_recharge_soc > 0 && new_recharge_soc < FULL_CAPACITY) {
-		rc = fg_set_recharge_soc(chip, new_recharge_soc);
-		if (rc) {
-			pr_err("Couldn't set resume SOC for FG, rc=%d\n", rc);
-			return rc;
-		}
-
-		chip->recharge_soc_adjusted = (new_recharge_soc !=
-						recharge_soc);
-		fg_dbg(chip, FG_STATUS, "resume soc set to %d\n",
-			new_recharge_soc);
+	rc = fg_set_recharge_soc(chip, new_recharge_soc);
+	if (rc < 0) {
+		chip->recharge_soc_adjusted = recharge_soc_status;
+		pr_err("Couldn't set resume SOC for FG, rc=%d\n", rc);
+		return rc;
 	}
 
+	fg_dbg(chip, FG_STATUS, "resume soc set to %d\n", new_recharge_soc);
 	return 0;
 }
 
@@ -2156,6 +2218,35 @@
 	return count;
 }
 
+static int fg_bp_params_config(struct fg_chip *chip)
+{
+	int rc = 0;
+	u8 buf;
+
+	/* This SRAM register is only present in v2.0 and above */
+	if (!(chip->wa_flags & PMI8998_V1_REV_WA) &&
+					chip->bp.float_volt_uv > 0) {
+		fg_encode(chip->sp, FG_SRAM_FLOAT_VOLT,
+			chip->bp.float_volt_uv / 1000, &buf);
+		rc = fg_sram_write(chip, chip->sp[FG_SRAM_FLOAT_VOLT].addr_word,
+			chip->sp[FG_SRAM_FLOAT_VOLT].addr_byte, &buf,
+			chip->sp[FG_SRAM_FLOAT_VOLT].len, FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing float_volt, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (chip->bp.vbatt_full_mv > 0) {
+		rc = fg_set_constant_chg_voltage(chip,
+				chip->bp.vbatt_full_mv * 1000);
+		if (rc < 0)
+			return rc;
+	}
+
+	return rc;
+}
+
 #define PROFILE_LOAD_BIT	BIT(0)
 #define BOOTLOADER_LOAD_BIT	BIT(1)
 #define BOOTLOADER_RESTART_BIT	BIT(2)
@@ -2176,6 +2267,17 @@
 	/* Check if integrity bit is set */
 	if (val & PROFILE_LOAD_BIT) {
 		fg_dbg(chip, FG_STATUS, "Battery profile integrity bit is set\n");
+
+		/* Whitelist the values */
+		val &= ~PROFILE_LOAD_BIT;
+		if (val != HLOS_RESTART_BIT && val != BOOTLOADER_LOAD_BIT &&
+			val != (BOOTLOADER_LOAD_BIT | BOOTLOADER_RESTART_BIT)) {
+			val |= PROFILE_LOAD_BIT;
+			pr_warn("Garbage value in profile integrity word: 0x%x\n",
+				val);
+			return true;
+		}
+
 		rc = fg_sram_read(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
 				buf, PROFILE_COMP_LEN, FG_IMA_DEFAULT);
 		if (rc < 0) {
@@ -2323,6 +2425,11 @@
 	}
 
 done:
+	rc = fg_bp_params_config(chip);
+	if (rc < 0)
+		pr_err("Error in configuring battery profile params, rc:%d\n",
+			rc);
+
 	rc = fg_sram_read(chip, NOM_CAP_WORD, NOM_CAP_OFFSET, buf, 2,
 			FG_IMA_DEFAULT);
 	if (rc < 0) {
@@ -2806,7 +2913,7 @@
 		pval->intval = chip->cyc_ctr.id;
 		break;
 	case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
-		rc = fg_get_cc_soc(chip, &pval->intval);
+		rc = fg_get_charge_raw(chip, &pval->intval);
 		break;
 	case POWER_SUPPLY_PROP_CHARGE_NOW:
 		pval->intval = chip->cl.init_cc_uah;
@@ -2815,7 +2922,7 @@
 		pval->intval = chip->cl.learned_cc_uah;
 		break;
 	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
-		rc = fg_get_cc_soc_sw(chip, &pval->intval);
+		rc = fg_get_charge_counter(chip, &pval->intval);
 		break;
 	case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
 		rc = fg_get_time_to_full(chip, &pval->intval);
@@ -2974,27 +3081,6 @@
 		return rc;
 	}
 
-	/* This SRAM register is only present in v2.0 and above */
-	if (!(chip->wa_flags & PMI8998_V1_REV_WA) &&
-					chip->bp.float_volt_uv > 0) {
-		fg_encode(chip->sp, FG_SRAM_FLOAT_VOLT,
-			chip->bp.float_volt_uv / 1000, buf);
-		rc = fg_sram_write(chip, chip->sp[FG_SRAM_FLOAT_VOLT].addr_word,
-			chip->sp[FG_SRAM_FLOAT_VOLT].addr_byte, buf,
-			chip->sp[FG_SRAM_FLOAT_VOLT].len, FG_IMA_DEFAULT);
-		if (rc < 0) {
-			pr_err("Error in writing float_volt, rc=%d\n", rc);
-			return rc;
-		}
-	}
-
-	if (chip->bp.vbatt_full_mv > 0) {
-		rc = fg_set_constant_chg_voltage(chip,
-				chip->bp.vbatt_full_mv * 1000);
-		if (rc < 0)
-			return rc;
-	}
-
 	fg_encode(chip->sp, FG_SRAM_CHG_TERM_CURR, chip->dt.chg_term_curr_ma,
 		buf);
 	rc = fg_sram_write(chip, chip->sp[FG_SRAM_CHG_TERM_CURR].addr_word,
@@ -3164,12 +3250,10 @@
 		return rc;
 	}
 
-	if (chip->dt.rconn_mohms > 0) {
-		rc = fg_rconn_config(chip);
-		if (rc < 0) {
-			pr_err("Error in configuring Rconn, rc=%d\n", rc);
-			return rc;
-		}
+	rc = fg_rconn_config(chip);
+	if (rc < 0) {
+		pr_err("Error in configuring Rconn, rc=%d\n", rc);
+		return rc;
 	}
 
 	fg_encode(chip->sp, FG_SRAM_ESR_TIGHT_FILTER,
@@ -3192,6 +3276,24 @@
 		return rc;
 	}
 
+	fg_encode(chip->sp, FG_SRAM_ESR_PULSE_THRESH,
+		chip->dt.esr_pulse_thresh_ma, buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_PULSE_THRESH].addr_word,
+			chip->sp[FG_SRAM_ESR_PULSE_THRESH].addr_byte, buf,
+			chip->sp[FG_SRAM_ESR_PULSE_THRESH].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing esr_pulse_thresh_ma, rc=%d\n", rc);
+		return rc;
+	}
+
+	get_esr_meas_current(chip->dt.esr_meas_curr_ma, &val);
+	rc = fg_masked_write(chip, BATT_INFO_ESR_PULL_DN_CFG(chip),
+			ESR_PULL_DOWN_IVAL_MASK, val);
+	if (rc < 0) {
+		pr_err("Error in writing esr_meas_curr_ma, rc=%d\n", rc);
+		return rc;
+	}
+
 	return 0;
 }
 
@@ -3216,20 +3318,19 @@
 	}
 
 	fg_dbg(chip, FG_IRQ, "irq %d triggered, status:%d\n", irq, status);
-	if (status & MEM_XCP_BIT) {
-		rc = fg_clear_dma_errors_if_any(chip);
-		if (rc < 0) {
-			pr_err("Error in clearing DMA error, rc=%d\n", rc);
-			return IRQ_HANDLED;
-		}
 
-		mutex_lock(&chip->sram_rw_lock);
+	mutex_lock(&chip->sram_rw_lock);
+	rc = fg_clear_dma_errors_if_any(chip);
+	if (rc < 0)
+		pr_err("Error in clearing DMA error, rc=%d\n", rc);
+
+	if (status & MEM_XCP_BIT) {
 		rc = fg_clear_ima_errors_if_any(chip, true);
 		if (rc < 0 && rc != -EAGAIN)
 			pr_err("Error in checking IMA errors rc:%d\n", rc);
-		mutex_unlock(&chip->sram_rw_lock);
 	}
 
+	mutex_unlock(&chip->sram_rw_lock);
 	return IRQ_HANDLED;
 }
 
@@ -3676,6 +3777,8 @@
 #define DEFAULT_ESR_TIGHT_LT_FLT_UPCT	48829
 #define DEFAULT_ESR_BROAD_LT_FLT_UPCT	148438
 #define DEFAULT_ESR_CLAMP_MOHMS		20
+#define DEFAULT_ESR_PULSE_THRESH_MA	110
+#define DEFAULT_ESR_MEAS_CURR_MA	120
 static int fg_parse_dt(struct fg_chip *chip)
 {
 	struct device_node *child, *revid_node, *node = chip->dev->of_node;
@@ -3725,6 +3828,7 @@
 	case PM660_SUBTYPE:
 		chip->sp = pmi8998_v2_sram_params;
 		chip->alg_flags = pmi8998_v2_alg_flags;
+		chip->use_ima_single_mode = true;
 		break;
 	default:
 		return -EINVAL;
@@ -3945,9 +4049,7 @@
 		pr_err("Error in parsing Ki coefficients, rc=%d\n", rc);
 
 	rc = of_property_read_u32(node, "qcom,fg-rconn-mohms", &temp);
-	if (rc < 0)
-		chip->dt.rconn_mohms = -EINVAL;
-	else
+	if (!rc)
 		chip->dt.rconn_mohms = temp;
 
 	rc = of_property_read_u32(node, "qcom,fg-esr-filter-switch-temp",
@@ -3995,6 +4097,22 @@
 	else
 		chip->dt.esr_clamp_mohms = temp;
 
+	chip->dt.esr_pulse_thresh_ma = DEFAULT_ESR_PULSE_THRESH_MA;
+	rc = of_property_read_u32(node, "qcom,fg-esr-pulse-thresh-ma", &temp);
+	if (!rc) {
+		/* ESR pulse qualification threshold range is 1-997 mA */
+		if (temp > 0 && temp < 997)
+			chip->dt.esr_pulse_thresh_ma = temp;
+	}
+
+	chip->dt.esr_meas_curr_ma = DEFAULT_ESR_MEAS_CURR_MA;
+	rc = of_property_read_u32(node, "qcom,fg-esr-meas-curr-ma", &temp);
+	if (!rc) {
+		/* ESR measurement current range is 60-240 mA */
+		if (temp >= 60 || temp <= 240)
+			chip->dt.esr_meas_curr_ma = temp;
+	}
+
 	return 0;
 }
 
@@ -4005,6 +4123,9 @@
 	if (chip->awake_votable)
 		destroy_votable(chip->awake_votable);
 
+	if (chip->delta_bsoc_irq_en_votable)
+		destroy_votable(chip->delta_bsoc_irq_en_votable);
+
 	if (chip->batt_id_chan)
 		iio_channel_release(chip->batt_id_chan);
 
@@ -4046,7 +4167,15 @@
 					chip);
 	if (IS_ERR(chip->awake_votable)) {
 		rc = PTR_ERR(chip->awake_votable);
-		return rc;
+		goto exit;
+	}
+
+	chip->delta_bsoc_irq_en_votable = create_votable("FG_DELTA_BSOC_IRQ",
+						VOTE_SET_ANY,
+						fg_delta_bsoc_irq_en_cb, chip);
+	if (IS_ERR(chip->delta_bsoc_irq_en_votable)) {
+		rc = PTR_ERR(chip->delta_bsoc_irq_en_votable);
+		goto exit;
 	}
 
 	rc = fg_parse_dt(chip);
@@ -4073,7 +4202,7 @@
 	rc = fg_get_batt_id(chip);
 	if (rc < 0) {
 		pr_err("Error in getting battery id, rc:%d\n", rc);
-		return rc;
+		goto exit;
 	}
 
 	rc = fg_get_batt_profile(chip);
@@ -4131,11 +4260,7 @@
 		disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq);
 
 	/* Keep BSOC_DELTA_IRQ irq disabled until we require it */
-	if (fg_irqs[BSOC_DELTA_IRQ].irq) {
-		disable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
-		disable_irq_nosync(fg_irqs[BSOC_DELTA_IRQ].irq);
-		chip->bsoc_delta_irq_en = false;
-	}
+	rerun_election(chip->delta_bsoc_irq_en_votable);
 
 	rc = fg_debugfs_create(chip);
 	if (rc < 0) {
diff --git a/drivers/power/supply/qcom/qpnp-qnovo.c b/drivers/power/supply/qcom/qpnp-qnovo.c
index cbfab30..c74dc89 100644
--- a/drivers/power/supply/qcom/qpnp-qnovo.c
+++ b/drivers/power/supply/qcom/qpnp-qnovo.c
@@ -19,7 +19,7 @@
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/qpnp/qpnp-revid.h>
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
 
 #define QNOVO_REVISION1		0x00
 #define QNOVO_REVISION2		0x01
@@ -29,6 +29,8 @@
 #define QNOVO_PTRAIN_STS	0x08
 #define QNOVO_ERROR_STS		0x09
 #define QNOVO_ERROR_BIT		BIT(0)
+#define QNOVO_ERROR_STS2	0x0A
+#define QNOVO_ERROR_CHARGING_DISABLED	BIT(1)
 #define QNOVO_INT_RT_STS	0x10
 #define QNOVO_INT_SET_TYPE	0x11
 #define QNOVO_INT_POLARITY_HIGH	0x12
@@ -109,20 +111,6 @@
 	struct device_node	*revid_dev_node;
 };
 
-enum {
-	QNOVO_NO_ERR_STS_BIT		= BIT(0),
-};
-
-struct chg_props {
-	bool		charging;
-	bool		usb_online;
-	bool		dc_online;
-};
-
-struct chg_status {
-	bool		ok_to_qnovo;
-};
-
 struct qnovo {
 	int			base;
 	struct mutex		write_lock;
@@ -141,13 +129,10 @@
 	s64			v_gain_mega;
 	struct notifier_block	nb;
 	struct power_supply	*batt_psy;
-	struct power_supply	*usb_psy;
-	struct power_supply	*dc_psy;
-	struct chg_props	cp;
-	struct chg_status	cs;
 	struct work_struct	status_change_work;
 	int			fv_uV_request;
 	int			fcc_uA_request;
+	bool			ok_to_qnovo;
 };
 
 static int debug_mask;
@@ -272,28 +257,22 @@
 					const char *client)
 {
 	struct qnovo *chip = data;
-	int rc = 0;
+	union power_supply_propval pval = {0};
+	int rc;
 
-	if (disable) {
-		rc = qnovo_batt_psy_update(chip, true);
-		if (rc < 0)
-			return rc;
-	}
+	if (!is_batt_available(chip))
+		return -EINVAL;
 
-	rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
-				 disable ? 0 : QNOVO_PTRAIN_EN_BIT);
+	pval.intval = !disable;
+	rc = power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE,
+			&pval);
 	if (rc < 0) {
-		dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n",
-			disable ? "disable" : "enable", rc);
-		return rc;
+		pr_err("Couldn't set prop qnovo_enable rc = %d\n", rc);
+		return -EINVAL;
 	}
 
-	if (!disable) {
-		rc = qnovo_batt_psy_update(chip, false);
-		if (rc < 0)
-			return rc;
-	}
-
+	rc = qnovo_batt_psy_update(chip, disable);
 	return rc;
 }
 
@@ -325,36 +304,18 @@
 	return 0;
 }
 
-static int qnovo_check_chg_version(struct qnovo *chip)
-{
-	int rc;
-
-	chip->pmic_rev_id = get_revid_data(chip->dt.revid_dev_node);
-	if (IS_ERR(chip->pmic_rev_id)) {
-		rc = PTR_ERR(chip->pmic_rev_id);
-		if (rc != -EPROBE_DEFER)
-			pr_err("Unable to get pmic_revid rc=%d\n", rc);
-		return rc;
-	}
-
-	if ((chip->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE)
-		   && (chip->pmic_rev_id->rev4 < PMI8998_V2P0_REV4)) {
-		chip->wa_flags |= QNOVO_NO_ERR_STS_BIT;
-	}
-
-	return 0;
-}
-
 enum {
 	VER = 0,
 	OK_TO_QNOVO,
-	ENABLE,
+	QNOVO_ENABLE,
+	PT_ENABLE,
 	FV_REQUEST,
 	FCC_REQUEST,
 	PE_CTRL_REG,
 	PE_CTRL2_REG,
 	PTRAIN_STS_REG,
 	INT_RT_STS_REG,
+	ERR_STS2_REG,
 	PREST1,
 	PPULS1,
 	NREST1,
@@ -394,6 +355,12 @@
 };
 
 static struct param_info params[] = {
+	[PT_ENABLE] = {
+		.name			= "PT_ENABLE",
+		.start_addr		= QNOVO_PTRAIN_EN,
+		.num_regs		= 1,
+		.units_str		= "",
+	},
 	[FV_REQUEST] = {
 		.units_str		= "uV",
 	},
@@ -424,6 +391,12 @@
 		.num_regs		= 1,
 		.units_str		= "",
 	},
+	[ERR_STS2_REG] = {
+		.name			= "RAW_CHGR_ERR",
+		.start_addr		= QNOVO_ERROR_STS2,
+		.num_regs		= 1,
+		.units_str		= "",
+	},
 	[PREST1] = {
 		.name			= "PREST1",
 		.start_addr		= QNOVO_PREST1_CTRL,
@@ -431,7 +404,7 @@
 		.reg_to_unit_multiplier	= 5,
 		.reg_to_unit_divider	= 1,
 		.min_val		= 5,
-		.max_val		= 1275,
+		.max_val		= 255,
 		.units_str		= "mS",
 	},
 	[PPULS1] = {
@@ -440,8 +413,8 @@
 		.num_regs		= 2,
 		.reg_to_unit_multiplier	= 1600, /* converts to uC */
 		.reg_to_unit_divider	= 1,
-		.min_val		= 0,
-		.max_val		= 104856000,
+		.min_val		= 30000,
+		.max_val		= 65535000,
 		.units_str		= "uC",
 	},
 	[NREST1] = {
@@ -451,7 +424,7 @@
 		.reg_to_unit_multiplier	= 5,
 		.reg_to_unit_divider	= 1,
 		.min_val		= 5,
-		.max_val		= 1275,
+		.max_val		= 255,
 		.units_str		= "mS",
 	},
 	[NPULS1] = {
@@ -460,8 +433,8 @@
 		.num_regs		= 1,
 		.reg_to_unit_multiplier	= 5,
 		.reg_to_unit_divider	= 1,
-		.min_val		= 5,
-		.max_val		= 1275,
+		.min_val		= 0,
+		.max_val		= 255,
 		.units_str		= "mS",
 	},
 	[PPCNT] = {
@@ -470,7 +443,7 @@
 		.num_regs		= 1,
 		.reg_to_unit_multiplier	= 1,
 		.reg_to_unit_divider	= 1,
-		.min_val		= 0,
+		.min_val		= 1,
 		.max_val		= 255,
 		.units_str		= "pulses",
 	},
@@ -480,8 +453,8 @@
 		.num_regs		= 2,
 		.reg_to_unit_multiplier	= 610350, /* converts to nV */
 		.reg_to_unit_divider	= 1,
-		.min_val		= 0,
-		.max_val		= 5000000,
+		.min_val		= 2200000,
+		.max_val		= 4500000,
 		.units_str		= "uV",
 	},
 	[PVOLT1] = {
@@ -506,8 +479,6 @@
 		.num_regs		= 1,
 		.reg_to_unit_multiplier	= 2,
 		.reg_to_unit_divider	= 1,
-		.min_val		= 5,
-		.max_val		= 1275,
 		.units_str		= "S",
 	},
 	[PREST2] = {
@@ -517,7 +488,7 @@
 		.reg_to_unit_multiplier	= 5,
 		.reg_to_unit_divider	= 1,
 		.min_val		= 5,
-		.max_val		= 327675,
+		.max_val		= 65535,
 		.units_str		= "mS",
 	},
 	[PPULS2] = {
@@ -526,8 +497,8 @@
 		.num_regs		= 2,
 		.reg_to_unit_multiplier	= 1600, /* converts to uC */
 		.reg_to_unit_divider	= 1,
-		.min_val		= 0,
-		.max_val		= 104856000,
+		.min_val		= 30000,
+		.max_val		= 65535000,
 		.units_str		= "uC",
 	},
 	[NREST2] = {
@@ -538,7 +509,7 @@
 		.reg_to_unit_divider	= 1,
 		.reg_to_unit_offset	= -5,
 		.min_val		= 5,
-		.max_val		= 1280,
+		.max_val		= 255,
 		.units_str		= "mS",
 	},
 	[NPULS2] = {
@@ -547,18 +518,18 @@
 		.num_regs		= 1,
 		.reg_to_unit_multiplier	= 5,
 		.reg_to_unit_divider	= 1,
-		.min_val		= 5,
-		.max_val		= 1275,
+		.min_val		= 0,
+		.max_val		= 255,
 		.units_str		= "mS",
 	},
 	[VLIM2] = {
-		.name			= "VLIM1",
+		.name			= "VLIM2",
 		.start_addr		= QNOVO_VLIM2_LSB_CTRL,
 		.num_regs		= 2,
 		.reg_to_unit_multiplier	= 610350, /* converts to nV */
 		.reg_to_unit_divider	= 1,
-		.min_val		= 0,
-		.max_val		= 5000000,
+		.min_val		= 2200000,
+		.max_val		= 4500000,
 		.units_str		= "uV",
 	},
 	[PVOLT2] = {
@@ -591,6 +562,8 @@
 		.num_regs		= 1,
 		.reg_to_unit_multiplier	= 1,
 		.reg_to_unit_divider	= 1,
+		.min_val		= 0,
+		.max_val		= 255,
 		.units_str		= "pulses",
 	},
 	[VMAX] = {
@@ -645,33 +618,73 @@
 {
 	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", chip->cs.ok_to_qnovo);
+	return snprintf(buf, PAGE_SIZE, "%d\n", chip->ok_to_qnovo);
 }
 
-static ssize_t enable_show(struct class *c, struct class_attribute *attr,
+static ssize_t qnovo_enable_show(struct class *c, struct class_attribute *attr,
 			char *ubuf)
 {
 	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
-	int val;
+	int val = get_effective_result(chip->disable_votable);
 
-	val = get_client_vote(chip->disable_votable, USER_VOTER);
-	val = !val;
-	return snprintf(ubuf, PAGE_SIZE, "%d\n", val);
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", !val);
 }
 
-static ssize_t enable_store(struct class *c, struct class_attribute *attr,
+static ssize_t qnovo_enable_store(struct class *c, struct class_attribute *attr,
 			const char *ubuf, size_t count)
 {
 	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
 	unsigned long val;
-	bool disable;
 
-	if (kstrtoul(ubuf, 10, &val))
+	if (kstrtoul(ubuf, 0, &val))
 		return -EINVAL;
 
-	disable = !val;
+	vote(chip->disable_votable, USER_VOTER, !val, 0);
 
-	vote(chip->disable_votable, USER_VOTER, disable, 0);
+	return count;
+}
+
+static ssize_t pt_enable_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	u16 regval;
+	int rc;
+
+	rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+	regval = buf[1] << 8 | buf[0];
+
+	return snprintf(ubuf, PAGE_SIZE, "%d\n",
+				(int)(regval & QNOVO_PTRAIN_EN_BIT));
+}
+
+static ssize_t pt_enable_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	unsigned long val;
+	int rc = 0;
+
+	if (get_effective_result(chip->disable_votable))
+		return -EINVAL;
+
+	if (kstrtoul(ubuf, 0, &val))
+		return -EINVAL;
+
+	rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
+				 (bool)val ? QNOVO_PTRAIN_EN_BIT : 0);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n",
+			(bool)val ? "enable" : "disable", rc);
+		return rc;
+	}
+
 	return count;
 }
 
@@ -688,7 +701,7 @@
 	if (i == FCC_REQUEST)
 		val = chip->fcc_uA_request;
 
-	return snprintf(ubuf, PAGE_SIZE, "%d%s\n", val, params[i].units_str);
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", val);
 }
 
 static ssize_t val_store(struct class *c, struct class_attribute *attr,
@@ -698,7 +711,7 @@
 	int i = attr - qnovo_attributes;
 	unsigned long val;
 
-	if (kstrtoul(ubuf, 10, &val))
+	if (kstrtoul(ubuf, 0, &val))
 		return -EINVAL;
 
 	if (i == FV_REQUEST)
@@ -707,6 +720,9 @@
 	if (i == FCC_REQUEST)
 		chip->fcc_uA_request = val;
 
+	if (!get_effective_result(chip->disable_votable))
+		qnovo_batt_psy_update(chip, false);
+
 	return count;
 }
 
@@ -726,8 +742,7 @@
 	}
 	regval = buf[1] << 8 | buf[0];
 
-	return snprintf(ubuf, PAGE_SIZE, "0x%04x%s\n",
-			regval, params[i].units_str);
+	return snprintf(ubuf, PAGE_SIZE, "0x%04x\n", regval);
 }
 
 static ssize_t reg_store(struct class *c, struct class_attribute *attr,
@@ -739,7 +754,7 @@
 	unsigned long val;
 	int rc;
 
-	if (kstrtoul(ubuf, 16, &val))
+	if (kstrtoul(ubuf, 0, &val))
 		return -EINVAL;
 
 	buf[0] = val & 0xFF;
@@ -774,7 +789,7 @@
 			/ params[i].reg_to_unit_divider)
 		- params[i].reg_to_unit_offset;
 
-	return snprintf(ubuf, PAGE_SIZE, "%d%s\n", val, params[i].units_str);
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", val);
 }
 
 static ssize_t time_store(struct class *c, struct class_attribute *attr,
@@ -787,7 +802,7 @@
 	unsigned long val;
 	int rc;
 
-	if (kstrtoul(ubuf, 10, &val))
+	if (kstrtoul(ubuf, 0, &val))
 		return -EINVAL;
 
 	if (val < params[i].min_val || val > params[i].max_val) {
@@ -828,7 +843,11 @@
 		pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
 		return -EINVAL;
 	}
-	regval_nA = buf[1] << 8 | buf[0];
+
+	if (buf[1] & BIT(5))
+		buf[1] |= GENMASK(7, 6);
+
+	regval_nA = (s16)(buf[1] << 8 | buf[0]);
 	regval_nA = div_s64(regval_nA * params[i].reg_to_unit_multiplier,
 					params[i].reg_to_unit_divider)
 			- params[i].reg_to_unit_offset;
@@ -841,11 +860,10 @@
 		gain = chip->internal_i_gain_mega;
 	}
 
-	comp_val_nA = div_s64(regval_nA * gain, 1000000) + offset_nA;
+	comp_val_nA = div_s64(regval_nA * gain, 1000000) - offset_nA;
 	comp_val_uA = div_s64(comp_val_nA, 1000);
 
-	return snprintf(ubuf, PAGE_SIZE, "%d%s\n",
-			comp_val_uA, params[i].units_str);
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", comp_val_uA);
 }
 
 static ssize_t voltage_show(struct class *c, struct class_attribute *attr,
@@ -875,8 +893,7 @@
 	comp_val_nV = div_s64(regval_nV * gain, 1000000) + offset_nV;
 	comp_val_uV = div_s64(comp_val_nV, 1000);
 
-	return snprintf(ubuf, PAGE_SIZE, "%d%s\n",
-				comp_val_uV, params[i].units_str);
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", comp_val_uV);
 }
 
 static ssize_t voltage_store(struct class *c, struct class_attribute *attr,
@@ -890,7 +907,7 @@
 	s64 regval_nV;
 	s64 gain, offset_nV;
 
-	if (kstrtoul(ubuf, 10, &val_uV))
+	if (kstrtoul(ubuf, 0, &val_uV))
 		return -EINVAL;
 
 	if (val_uV < params[i].min_val || val_uV > params[i].max_val) {
@@ -947,8 +964,7 @@
 		gain = chip->internal_i_gain_mega;
 
 	comp_val_uC = div_s64(regval_uC * gain, 1000000);
-	return snprintf(ubuf, PAGE_SIZE, "%d%s\n",
-			comp_val_uC, params[i].units_str);
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", comp_val_uC);
 }
 
 static ssize_t coulomb_store(struct class *c, struct class_attribute *attr,
@@ -962,7 +978,7 @@
 	s64 regval;
 	s64 gain;
 
-	if (kstrtoul(ubuf, 10, &val_uC))
+	if (kstrtoul(ubuf, 0, &val_uC))
 		return -EINVAL;
 
 	if (val_uC < params[i].min_val || val_uC > params[i].max_val) {
@@ -1014,15 +1030,14 @@
 		return -EINVAL;
 	}
 
-	return snprintf(ubuf, PAGE_SIZE, "%d%s\n",
-			pval.intval, params[i].units_str);
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", pval.intval);
 }
 
 static struct class_attribute qnovo_attributes[] = {
 	[VER]			= __ATTR_RO(version),
 	[OK_TO_QNOVO]		= __ATTR_RO(ok_to_qnovo),
-	[ENABLE]		= __ATTR(enable, 0644,
-					enable_show, enable_store),
+	[QNOVO_ENABLE]		= __ATTR_RW(qnovo_enable),
+	[PT_ENABLE]		= __ATTR_RW(pt_enable),
 	[FV_REQUEST]		= __ATTR(fv_uV_request, 0644,
 					val_show, val_store),
 	[FCC_REQUEST]		= __ATTR(fcc_uA_request, 0644,
@@ -1031,10 +1046,12 @@
 					reg_show, reg_store),
 	[PE_CTRL2_REG]		= __ATTR(PE_CTRL2_REG, 0644,
 					reg_show, reg_store),
-	[PTRAIN_STS_REG]	= __ATTR(PTRAIN_STS_REG, 0644,
-					reg_show, reg_store),
-	[INT_RT_STS_REG]	= __ATTR(INT_RT_STS_REG, 0644,
-					reg_show, reg_store),
+	[PTRAIN_STS_REG]	= __ATTR(PTRAIN_STS_REG, 0444,
+					reg_show, NULL),
+	[INT_RT_STS_REG]	= __ATTR(INT_RT_STS_REG, 0444,
+					reg_show, NULL),
+	[ERR_STS2_REG]		= __ATTR(ERR_STS2_REG, 0444,
+					reg_show, NULL),
 	[PREST1]		= __ATTR(PREST1_mS, 0644,
 					time_show, time_store),
 	[PPULS1]		= __ATTR(PPULS1_uC, 0644,
@@ -1055,7 +1072,7 @@
 					time_show, NULL),
 	[PREST2]		= __ATTR(PREST2_mS, 0644,
 					time_show, time_store),
-	[PPULS2]		= __ATTR(PPULS2_mS, 0644,
+	[PPULS2]		= __ATTR(PPULS2_uC, 0644,
 					coulomb_show, coulomb_store),
 	[NREST2]		= __ATTR(NREST2_mS, 0644,
 					time_show, time_store),
@@ -1073,8 +1090,8 @@
 					time_show, time_store),
 	[VMAX]			= __ATTR(VMAX_uV, 0444,
 					voltage_show, NULL),
-	[SNUM]			= __ATTR(SNUM, 0644,
-					time_show, time_store),
+	[SNUM]			= __ATTR(SNUM, 0444,
+					time_show, NULL),
 	[VBATT]			= __ATTR(VBATT_uV, 0444,
 					batt_prop_show, NULL),
 	[IBATT]			= __ATTR(IBATT_uA, 0444,
@@ -1086,95 +1103,40 @@
 	__ATTR_NULL,
 };
 
-static void get_chg_props(struct qnovo *chip, struct chg_props *cp)
+static int qnovo_update_status(struct qnovo *chip)
 {
-	union power_supply_propval pval;
 	u8 val = 0;
 	int rc;
+	bool charging;
+	bool changed = false;
 
-	cp->charging = true;
-	rc = qnovo_read(chip, QNOVO_ERROR_STS, &val, 1);
+	rc = qnovo_read(chip, QNOVO_ERROR_STS2, &val, 1);
 	if (rc < 0) {
 		pr_err("Couldn't read error sts rc = %d\n", rc);
-		cp->charging = false;
+		charging = false;
 	} else {
-		cp->charging = (!(val & QNOVO_ERROR_BIT));
+		charging = !(val & QNOVO_ERROR_CHARGING_DISABLED);
 	}
 
-	if (chip->wa_flags & QNOVO_NO_ERR_STS_BIT) {
-		/*
-		 * on v1.0 and v1.1 pmic's force charging to true
-		 * if things are not good to charge s/w gets a PTRAIN_DONE
-		 * interrupt
-		 */
-		cp->charging = true;
+	if (chip->ok_to_qnovo ^ charging) {
+
+		vote(chip->disable_votable, OK_TO_QNOVO_VOTER, !charging, 0);
+		if (!charging)
+			vote(chip->disable_votable, USER_VOTER, true, 0);
+
+		chip->ok_to_qnovo = charging;
+		changed = true;
 	}
 
-	cp->usb_online = false;
-	if (!chip->usb_psy)
-		chip->usb_psy = power_supply_get_by_name("usb");
-	if (chip->usb_psy) {
-		rc = power_supply_get_property(chip->usb_psy,
-				POWER_SUPPLY_PROP_ONLINE, &pval);
-		if (rc < 0)
-			pr_err("Couldn't read usb online rc = %d\n", rc);
-		else
-			cp->usb_online = (bool)pval.intval;
-	}
-
-	cp->dc_online = false;
-	if (!chip->dc_psy)
-		chip->dc_psy = power_supply_get_by_name("dc");
-	if (chip->dc_psy) {
-		rc = power_supply_get_property(chip->dc_psy,
-				POWER_SUPPLY_PROP_ONLINE, &pval);
-		if (rc < 0)
-			pr_err("Couldn't read dc online rc = %d\n", rc);
-		else
-			cp->dc_online = (bool)pval.intval;
-	}
-}
-
-static void get_chg_status(struct qnovo *chip, const struct chg_props *cp,
-				struct chg_status *cs)
-{
-	cs->ok_to_qnovo = false;
-
-	if (cp->charging &&
-		(cp->usb_online || cp->dc_online))
-		cs->ok_to_qnovo = true;
+	return changed;
 }
 
 static void status_change_work(struct work_struct *work)
 {
 	struct qnovo *chip = container_of(work,
 			struct qnovo, status_change_work);
-	bool notify_uevent = false;
-	struct chg_props cp;
-	struct chg_status cs;
 
-	get_chg_props(chip, &cp);
-	get_chg_status(chip, &cp, &cs);
-
-	if (cs.ok_to_qnovo ^ chip->cs.ok_to_qnovo) {
-		/*
-		 * when it is not okay to Qnovo charge, disable both voters,
-		 * so that when it becomes okay to Qnovo charge the user voter
-		 * has to specifically enable its vote to being Qnovo charging
-		 */
-		if (!cs.ok_to_qnovo) {
-			vote(chip->disable_votable, OK_TO_QNOVO_VOTER, 1, 0);
-			vote(chip->disable_votable, USER_VOTER, 1, 0);
-		} else {
-			vote(chip->disable_votable, OK_TO_QNOVO_VOTER, 0, 0);
-		}
-		notify_uevent = true;
-	}
-
-	memcpy(&chip->cp, &cp, sizeof(struct chg_props));
-	memcpy(&chip->cs, &cs, sizeof(struct chg_status));
-
-	if (notify_uevent)
+	if (qnovo_update_status(chip))
 		kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
 }
 
@@ -1186,8 +1148,8 @@
 
 	if (ev != PSY_EVENT_PROP_CHANGED)
 		return NOTIFY_OK;
-	if ((strcmp(psy->desc->name, "battery") == 0)
-		|| (strcmp(psy->desc->name, "usb") == 0))
+
+	if (strcmp(psy->desc->name, "battery") == 0)
 		schedule_work(&chip->status_change_work);
 
 	return NOTIFY_OK;
@@ -1197,8 +1159,7 @@
 {
 	struct qnovo *chip = data;
 
-	/* disable user voter here */
-	vote(chip->disable_votable, USER_VOTER, 0, 0);
+	qnovo_update_status(chip);
 	kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
 	return IRQ_HANDLED;
 }
@@ -1211,7 +1172,14 @@
 	u8 vadc_offset, vadc_gain;
 	u8 val;
 
-	vote(chip->disable_votable, USER_VOTER, 1, 0);
+	vote(chip->disable_votable, USER_VOTER, true, 0);
+
+	val = 0;
+	rc = qnovo_write(chip, QNOVO_STRM_CTRL, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't write iadc bitstream control rc = %d\n", rc);
+		return rc;
+	}
 
 	rc = qnovo_read(chip, QNOVO_IADC_OFFSET_0, &iadc_offset_external, 1);
 	if (rc < 0) {
@@ -1219,12 +1187,28 @@
 		return rc;
 	}
 
+	/* stored as an 8 bit 2's complement signed integer */
+	val = -1 * iadc_offset_external;
+	rc = qnovo_write(chip, QNOVO_TR_IADC_OFFSET_0, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't write iadc offset rc = %d\n", rc);
+		return rc;
+	}
+
 	rc = qnovo_read(chip, QNOVO_IADC_OFFSET_1, &iadc_offset_internal, 1);
 	if (rc < 0) {
 		pr_err("Couldn't read iadc internal offset rc = %d\n", rc);
 		return rc;
 	}
 
+	/* stored as an 8 bit 2's complement signed integer */
+	val = -1 * iadc_offset_internal;
+	rc = qnovo_write(chip, QNOVO_TR_IADC_OFFSET_1, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't write iadc offset rc = %d\n", rc);
+		return rc;
+	}
+
 	rc = qnovo_read(chip, QNOVO_IADC_GAIN_0, &iadc_gain_external, 1);
 	if (rc < 0) {
 		pr_err("Couldn't read iadc external gain rc = %d\n", rc);
@@ -1249,53 +1233,20 @@
 		return rc;
 	}
 
-	chip->external_offset_nA = (s64)iadc_offset_external * IADC_LSB_NA;
-	chip->internal_offset_nA = (s64)iadc_offset_internal * IADC_LSB_NA;
-	chip->offset_nV = (s64)vadc_offset * VADC_LSB_NA;
+	chip->external_offset_nA = (s64)(s8)iadc_offset_external * IADC_LSB_NA;
+	chip->internal_offset_nA = (s64)(s8)iadc_offset_internal * IADC_LSB_NA;
+	chip->offset_nV = (s64)(s8)vadc_offset * VADC_LSB_NA;
 	chip->external_i_gain_mega
-		= 1000000000 + (s64)iadc_gain_external * GAIN_LSB_FACTOR;
+		= 1000000000 + (s64)(s8)iadc_gain_external * GAIN_LSB_FACTOR;
 	chip->external_i_gain_mega
 		= div_s64(chip->external_i_gain_mega, 1000);
 	chip->internal_i_gain_mega
-		= 1000000000 + (s64)iadc_gain_internal * GAIN_LSB_FACTOR;
+		= 1000000000 + (s64)(s8)iadc_gain_internal * GAIN_LSB_FACTOR;
 	chip->internal_i_gain_mega
 		= div_s64(chip->internal_i_gain_mega, 1000);
-	chip->v_gain_mega = 1000000000 + (s64)vadc_gain * GAIN_LSB_FACTOR;
+	chip->v_gain_mega = 1000000000 + (s64)(s8)vadc_gain * GAIN_LSB_FACTOR;
 	chip->v_gain_mega = div_s64(chip->v_gain_mega, 1000);
 
-	val = 0;
-	rc = qnovo_write(chip, QNOVO_STRM_CTRL, &val, 1);
-	if (rc < 0) {
-		pr_err("Couldn't write iadc bitsteam control rc = %d\n", rc);
-		return rc;
-	}
-
-	rc = qnovo_read(chip, QNOVO_TR_IADC_OFFSET_0, &val, 1);
-	if (rc < 0) {
-		pr_err("Couldn't read iadc offset rc = %d\n", rc);
-		return rc;
-	}
-
-	val *= -1;
-	rc = qnovo_write(chip, QNOVO_TR_IADC_OFFSET_0, &val, 1);
-	if (rc < 0) {
-		pr_err("Couldn't write iadc offset rc = %d\n", rc);
-		return rc;
-	}
-
-	rc = qnovo_read(chip, QNOVO_TR_IADC_OFFSET_1, &val, 1);
-	if (rc < 0) {
-		pr_err("Couldn't read iadc offset rc = %d\n", rc);
-		return rc;
-	}
-
-	val *= -1;
-	rc = qnovo_write(chip, QNOVO_TR_IADC_OFFSET_1, &val, 1);
-	if (rc < 0) {
-		pr_err("Couldn't write iadc offset rc = %d\n", rc);
-		return rc;
-	}
-
 	return 0;
 }
 
@@ -1333,6 +1284,9 @@
 					irq_ptrain_done, rc);
 		return rc;
 	}
+
+	enable_irq_wake(irq_ptrain_done);
+
 	return rc;
 }
 
@@ -1362,13 +1316,6 @@
 		return rc;
 	}
 
-	rc = qnovo_check_chg_version(chip);
-	if (rc < 0) {
-		if (rc != -EPROBE_DEFER)
-			pr_err("Couldn't check version rc=%d\n", rc);
-		return rc;
-	}
-
 	/* set driver data before resources request it */
 	platform_set_drvdata(pdev, chip);
 
@@ -1414,6 +1361,8 @@
 		goto unreg_notifier;
 	}
 
+	device_init_wakeup(chip->dev, true);
+
 	return rc;
 
 unreg_notifier:
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index dab7888..8fd45f18 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -11,6 +11,7 @@
  */
 
 #include <linux/debugfs.h>
+#include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -25,7 +26,7 @@
 #include "smb-reg.h"
 #include "smb-lib.h"
 #include "storm-watch.h"
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
 
 #define SMB2_DEFAULT_WPWR_UW	8000000
 
@@ -239,7 +240,6 @@
 struct smb_dt_props {
 	int	fcc_ua;
 	int	usb_icl_ua;
-	int	otg_cl_ua;
 	int	dc_icl_ua;
 	int	boost_threshold_ua;
 	int	fv_uv;
@@ -323,9 +323,9 @@
 		chip->dt.usb_icl_ua = -EINVAL;
 
 	rc = of_property_read_u32(node,
-				"qcom,otg-cl-ua", &chip->dt.otg_cl_ua);
+				"qcom,otg-cl-ua", &chg->otg_cl_ua);
 	if (rc < 0)
-		chip->dt.otg_cl_ua = MICRO_1P5A;
+		chg->otg_cl_ua = MICRO_1P5A;
 
 	rc = of_property_read_u32(node,
 				"qcom,dc-icl-ua", &chip->dt.dc_icl_ua);
@@ -414,6 +414,7 @@
 	POWER_SUPPLY_PROP_BOOST_CURRENT,
 	POWER_SUPPLY_PROP_PE_START,
 	POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
+	POWER_SUPPLY_PROP_HW_CURRENT_MAX,
 };
 
 static int smb2_usb_get_prop(struct power_supply *psy,
@@ -502,6 +503,9 @@
 	case POWER_SUPPLY_PROP_CTM_CURRENT_MAX:
 		val->intval = get_client_vote(chg->usb_icl_votable, CTM_VOTER);
 		break;
+	case POWER_SUPPLY_PROP_HW_CURRENT_MAX:
+		rc = smblib_get_charge_current(chg, &val->intval);
+		break;
 	default:
 		pr_err("get prop %d is not supported in usb\n", psp);
 		rc = -EINVAL;
@@ -610,12 +614,12 @@
 
 static enum power_supply_property smb2_usb_main_props[] = {
 	POWER_SUPPLY_PROP_VOLTAGE_MAX,
-	POWER_SUPPLY_PROP_ICL_REDUCTION,
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
 	POWER_SUPPLY_PROP_TYPE,
 	POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
 	POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED,
 	POWER_SUPPLY_PROP_FCC_DELTA,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
 	/*
 	 * TODO move the TEMP and TEMP_MAX properties here,
 	 * and update the thermal balancer to look here
@@ -634,9 +638,6 @@
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval);
 		break;
-	case POWER_SUPPLY_PROP_ICL_REDUCTION:
-		val->intval = chg->icl_reduction_ua;
-		break;
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
 		rc = smblib_get_charge_param(chg, &chg->param.fcc,
 							&val->intval);
@@ -653,6 +654,9 @@
 	case POWER_SUPPLY_PROP_FCC_DELTA:
 		rc = smblib_get_prop_fcc_delta(chg, val);
 		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		val->intval = get_effective_result(chg->usb_icl_votable);
+		break;
 	default:
 		pr_debug("get prop %d is not supported in usb-main\n", psp);
 		rc = -EINVAL;
@@ -677,12 +681,12 @@
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval);
 		break;
-	case POWER_SUPPLY_PROP_ICL_REDUCTION:
-		rc = smblib_set_icl_reduction(chg, val->intval);
-		break;
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
 		rc = smblib_set_charge_param(chg, &chg->param.fcc, val->intval);
 		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_set_icl_current(chg, val->intval);
+		break;
 	default:
 		pr_err("set prop %d is not supported\n", psp);
 		rc = -EINVAL;
@@ -838,7 +842,9 @@
 	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
 	POWER_SUPPLY_PROP_VOLTAGE_NOW,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_VOLTAGE_QNOVO,
 	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CURRENT_QNOVO,
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
 	POWER_SUPPLY_PROP_TEMP,
 	POWER_SUPPLY_PROP_TECHNOLOGY,
@@ -858,6 +864,7 @@
 {
 	struct smb_charger *chg = power_supply_get_drvdata(psy);
 	int rc = 0;
+	union power_supply_propval pval = {0, };
 
 	switch (psp) {
 	case POWER_SUPPLY_PROP_STATUS:
@@ -882,7 +889,14 @@
 		rc = smblib_get_prop_system_temp_level(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_CHARGER_TEMP:
-		rc = smblib_get_prop_charger_temp(chg, val);
+		/* do not query RRADC if charger is not present */
+		rc = smblib_get_prop_usb_present(chg, &pval);
+		if (rc < 0)
+			pr_err("Couldn't get usb present rc=%d\n", rc);
+
+		rc = -ENODATA;
+		if (pval.intval)
+			rc = smblib_get_prop_charger_temp(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
 		rc = smblib_get_prop_charger_temp_max(chg, val);
@@ -902,6 +916,9 @@
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		val->intval = get_client_vote(chg->fv_votable, DEFAULT_VOTER);
 		break;
+	case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE:
+		rc = smblib_get_prop_charge_qnovo_enable(chg, val);
+		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_QNOVO:
 		val->intval = chg->qnovo_fv_uv;
 		break;
@@ -977,12 +994,17 @@
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		vote(chg->fv_votable, DEFAULT_VOTER, true, val->intval);
 		break;
+	case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE:
+		rc = smblib_set_prop_charge_qnovo_enable(chg, val);
+		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_QNOVO:
 		chg->qnovo_fv_uv = val->intval;
 		rc = rerun_election(chg->fv_votable);
 		break;
 	case POWER_SUPPLY_PROP_CURRENT_QNOVO:
 		chg->qnovo_fcc_ua = val->intval;
+		vote(chg->pl_disable_votable, PL_QNOVO_VOTER,
+			val->intval != -EINVAL && val->intval < 2000000, 0);
 		rc = rerun_election(chg->fcc_votable);
 		break;
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
@@ -1115,6 +1137,9 @@
 	struct regulator_config cfg = {};
 	int rc = 0;
 
+	if (chg->micro_usb_mode)
+		return 0;
+
 	chg->vconn_vreg = devm_kzalloc(chg->dev, sizeof(*chg->vconn_vreg),
 				      GFP_KERNEL);
 	if (!chg->vconn_vreg)
@@ -1325,6 +1350,39 @@
 {
 	int rc;
 
+	/* Move to typeC mode */
+	/* configure FSM in idle state and disable UFP_ENABLE bit */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+			TYPEC_DISABLE_CMD_BIT | UFP_EN_CMD_BIT,
+			TYPEC_DISABLE_CMD_BIT);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't put FSM in idle rc=%d\n", rc);
+		return rc;
+	}
+
+	/* wait for FSM to enter idle state */
+	msleep(200);
+	/* configure TypeC mode */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+			TYPE_C_OR_U_USB_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't enable micro USB mode rc=%d\n", rc);
+		return rc;
+	}
+
+	/* wait for mode change before enabling FSM */
+	usleep_range(10000, 11000);
+	/* release FSM from idle state */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+			TYPEC_DISABLE_CMD_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't release FSM rc=%d\n", rc);
+		return rc;
+	}
+
+	/* wait for FSM to start */
+	msleep(100);
+	/* move to uUSB mode */
 	/* configure FSM in idle state */
 	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
 			TYPEC_DISABLE_CMD_BIT, TYPEC_DISABLE_CMD_BIT);
@@ -1333,6 +1391,8 @@
 		return rc;
 	}
 
+	/* wait for FSM to enter idle state */
+	msleep(200);
 	/* configure micro USB mode */
 	rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
 			TYPE_C_OR_U_USB_BIT, TYPE_C_OR_U_USB_BIT);
@@ -1341,6 +1401,8 @@
 		return rc;
 	}
 
+	/* wait for mode change before enabling FSM */
+	usleep_range(10000, 11000);
 	/* release FSM from idle state */
 	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
 			TYPEC_DISABLE_CMD_BIT, 0);
@@ -1386,7 +1448,8 @@
 
 	/* set OTG current limit */
 	rc = smblib_set_charge_param(chg, &chg->param.otg_cl,
-							chip->dt.otg_cl_ua);
+				(chg->wa_flags & OTG_WA) ?
+				chg->param.otg_cl.min_u : chg->otg_cl_ua);
 	if (rc < 0) {
 		pr_err("Couldn't set otg current limit rc=%d\n", rc);
 		return rc;
@@ -1420,10 +1483,10 @@
 		DEFAULT_VOTER, true, chip->dt.fv_uv);
 	vote(chg->dc_icl_votable,
 		DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
-	vote(chg->hvdcp_disable_votable_indirect, DEFAULT_VOTER,
-		chip->dt.hvdcp_disable, 0);
 	vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
 			true, 0);
+	vote(chg->hvdcp_disable_votable_indirect, DEFAULT_VOTER,
+		chip->dt.hvdcp_disable, 0);
 	vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER,
 			true, 0);
 	vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
@@ -1489,13 +1552,6 @@
 		return rc;
 	}
 
-	rc = smblib_masked_write(chg, QNOVO_PT_ENABLE_CMD_REG,
-			QNOVO_PT_ENABLE_CMD_BIT, QNOVO_PT_ENABLE_CMD_BIT);
-	if (rc < 0) {
-		dev_err(chg->dev, "Couldn't enable qnovo rc=%d\n", rc);
-		return rc;
-	}
-
 	/* configure step charging */
 	rc = smb2_config_step_charging(chip);
 	if (rc < 0) {
@@ -1520,6 +1576,16 @@
 		return rc;
 	}
 
+	/* disable h/w autonomous parallel charging control */
+	rc = smblib_masked_write(chg, MISC_CFG_REG,
+				 STAT_PARALLEL_1400MA_EN_CFG_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't disable h/w autonomous parallel control rc=%d\n",
+			rc);
+		return rc;
+	}
+
 	/* configure float charger options */
 	switch (chip->dt.float_option) {
 	case 1:
@@ -1608,6 +1674,15 @@
 	return rc;
 }
 
+static int smb2_post_init(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+
+	rerun_election(chg->usb_irq_enable_votable);
+
+	return 0;
+}
+
 static int smb2_chg_config_init(struct smb2 *chip)
 {
 	struct smb_charger *chg = &chip->chg;
@@ -1649,7 +1724,7 @@
 		break;
 	case PM660_SUBTYPE:
 		chip->chg.smb_version = PM660_SUBTYPE;
-		chip->chg.wa_flags |= BOOST_BACK_WA;
+		chip->chg.wa_flags |= BOOST_BACK_WA | OTG_WA;
 		chg->param.freq_buck = pm660_params.freq_buck;
 		chg->param.freq_boost = pm660_params.freq_boost;
 		chg->chg_freq.freq_5V		= 600;
@@ -2074,7 +2149,7 @@
 	rc = smb2_init_vconn_regulator(chip);
 	if (rc < 0) {
 		pr_err("Couldn't initialize vconn regulator rc=%d\n",
-			rc);
+				rc);
 		goto cleanup;
 	}
 
@@ -2137,6 +2212,8 @@
 		goto cleanup;
 	}
 
+	smb2_post_init(chip);
+
 	smb2_create_debugfs(chip);
 
 	rc = smblib_get_prop_usb_present(chg, &val);
@@ -2167,6 +2244,8 @@
 	}
 	batt_charge_type = val.intval;
 
+	device_init_wakeup(chg->dev, true);
+
 	pr_info("QPNP SMB2 probed successfully usb:present=%d type=%d batt:present = %d health = %d charge = %d\n",
 		usb_present, chg->usb_psy_desc.type,
 		batt_present, batt_health, batt_charge_type);
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index eb6727b..c8deedd 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -21,7 +21,7 @@
 #include "smb-lib.h"
 #include "smb-reg.h"
 #include "storm-watch.h"
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
 
 #define smblib_err(chg, fmt, ...)		\
 	pr_err("%s: %s: " fmt, chg->name,	\
@@ -160,39 +160,14 @@
 int smblib_icl_override(struct smb_charger *chg, bool override)
 {
 	int rc;
-	bool override_status;
-	u8 stat;
-	u16 reg;
 
-	switch (chg->smb_version) {
-	case PMI8998_SUBTYPE:
-		reg = APSD_RESULT_STATUS_REG;
-		break;
-	case PM660_SUBTYPE:
-		reg = AICL_STATUS_REG;
-		break;
-	default:
-		smblib_dbg(chg, PR_MISC, "Unknown chip version=%x\n",
-				chg->smb_version);
-		return -EINVAL;
-	}
+	rc = smblib_masked_write(chg, USBIN_LOAD_CFG_REG,
+				ICL_OVERRIDE_AFTER_APSD_BIT,
+				override ? ICL_OVERRIDE_AFTER_APSD_BIT : 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't override ICL rc=%d\n", rc);
 
-	rc = smblib_read(chg, reg, &stat);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't read reg=%x rc=%d\n", reg, rc);
-		return rc;
-	}
-	override_status = (bool)(stat & ICL_OVERRIDE_LATCH_BIT);
-
-	if (override != override_status) {
-		rc = smblib_masked_write(chg, CMD_APSD_REG,
-				ICL_OVERRIDE_BIT, ICL_OVERRIDE_BIT);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't override ICL rc=%d\n", rc);
-			return rc;
-		}
-	}
-	return 0;
+	return rc;
 }
 
 /********************
@@ -547,6 +522,26 @@
  * HELPER FUNCTIONS *
  ********************/
 
+static void smblib_rerun_apsd(struct smb_charger *chg)
+{
+	int rc;
+
+	smblib_dbg(chg, PR_MISC, "re-running APSD\n");
+	if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+		rc = smblib_masked_write(chg,
+				USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+				AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't enable HVDCP auth IRQ rc=%d\n",
+									rc);
+	}
+
+	rc = smblib_masked_write(chg, CMD_APSD_REG,
+				APSD_RERUN_BIT, APSD_RERUN_BIT);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't re-run APSD rc=%d\n", rc);
+}
+
 static int try_rerun_apsd_for_hvdcp(struct smb_charger *chg)
 {
 	const struct apsd_result *apsd_result;
@@ -564,11 +559,7 @@
 				chg->hvdcp_disable_votable_indirect)) {
 			apsd_result = smblib_get_apsd_result(chg);
 			if (apsd_result->bit & (QC_2P0_BIT | QC_3P0_BIT)) {
-				/* rerun APSD */
-				smblib_dbg(chg, PR_MISC, "rerun APSD\n");
-				smblib_masked_write(chg, CMD_APSD_REG,
-						APSD_RERUN_BIT,
-						APSD_RERUN_BIT);
+				smblib_rerun_apsd(chg);
 			}
 		}
 	}
@@ -580,12 +571,13 @@
 	const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
 
 	/* if PD is active, APSD is disabled so won't have a valid result */
-	if (chg->pd_active) {
+	if (chg->pd_active)
 		chg->usb_psy_desc.type = POWER_SUPPLY_TYPE_USB_PD;
-		return apsd_result;
-	}
+	else
+		chg->usb_psy_desc.type = apsd_result->pst;
 
-	chg->usb_psy_desc.type = apsd_result->pst;
+	smblib_dbg(chg, PR_MISC, "APSD=%s PD=%d\n",
+					apsd_result->name, chg->pd_active);
 	return apsd_result;
 }
 
@@ -661,10 +653,13 @@
 {
 	int rc;
 
+	cancel_delayed_work_sync(&chg->pl_enable_work);
+	vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
+	vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
+
 	/* reset both usbin current and voltage votes */
 	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
 	vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
-	vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, true, 0);
 
 	cancel_delayed_work_sync(&chg->hvdcp_detect_work);
 
@@ -700,28 +695,6 @@
 	if (rc < 0)
 		smblib_err(chg,
 			"Couldn't un-vote DCP from USB ICL rc=%d\n", rc);
-
-	/* clear USB ICL vote for PL_USBIN_USBIN_VOTER */
-	rc = vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
-	if (rc < 0)
-		smblib_err(chg,
-			"Couldn't un-vote PL_USBIN_USBIN from USB ICL rc=%d\n",
-			rc);
-}
-
-static bool smblib_sysok_reason_usbin(struct smb_charger *chg)
-{
-	int rc;
-	u8 stat;
-
-	rc = smblib_read(chg, SYSOK_REASON_STATUS_REG, &stat);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't get SYSOK_REASON_STATUS rc=%d\n", rc);
-		/* assuming 'not usbin' in case of read failure */
-		return false;
-	}
-
-	return stat & SYSOK_REASON_USBIN_BIT;
 }
 
 void smblib_suspend_on_debug_battery(struct smb_charger *chg)
@@ -747,7 +720,6 @@
 
 int smblib_rerun_apsd_if_required(struct smb_charger *chg)
 {
-	const struct apsd_result *apsd_result;
 	union power_supply_propval val;
 	int rc;
 
@@ -760,21 +732,27 @@
 	if (!val.intval)
 		return 0;
 
-	apsd_result = smblib_get_apsd_result(chg);
-	if ((apsd_result->pst == POWER_SUPPLY_TYPE_UNKNOWN)
-		|| (apsd_result->pst == POWER_SUPPLY_TYPE_USB)) {
-		/* rerun APSD */
-		pr_info("Reruning APSD type = %s at bootup\n",
-				apsd_result->name);
-		rc = smblib_masked_write(chg, CMD_APSD_REG,
-					APSD_RERUN_BIT,
-					APSD_RERUN_BIT);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't rerun APSD rc = %d\n", rc);
-			return rc;
+	/* fetch the DPDM regulator */
+	if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
+						"dpdm-supply", NULL)) {
+		chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
+		if (IS_ERR(chg->dpdm_reg)) {
+			smblib_err(chg, "Couldn't get dpdm regulator rc=%ld\n",
+				PTR_ERR(chg->dpdm_reg));
+			chg->dpdm_reg = NULL;
 		}
 	}
 
+	if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
+		smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
+		rc = regulator_enable(chg->dpdm_reg);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
+				rc);
+	}
+
+	smblib_rerun_apsd(chg);
+
 	return 0;
 }
 
@@ -812,29 +790,12 @@
 	return 0;
 }
 
-/*********************
- * VOTABLE CALLBACKS *
- *********************/
-
-static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data,
-			int suspend, const char *client)
-{
-	struct smb_charger *chg = data;
-
-	/* resume input if suspend is invalid */
-	if (suspend < 0)
-		suspend = 0;
-
-	return smblib_set_dc_suspend(chg, (bool)suspend);
-}
-
 #define USBIN_25MA	25000
 #define USBIN_100MA	100000
 #define USBIN_150MA	150000
 #define USBIN_500MA	500000
 #define USBIN_900MA	900000
 
-
 static int set_sdp_current(struct smb_charger *chg, int icl_ua)
 {
 	int rc;
@@ -873,20 +834,18 @@
 	return rc;
 }
 
-static int smblib_usb_icl_vote_callback(struct votable *votable, void *data,
-			int icl_ua, const char *client)
+int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
 {
-	struct smb_charger *chg = data;
 	int rc = 0;
 	bool override;
 	union power_supply_propval pval;
 
 	/* suspend and return if 25mA or less is requested */
-	if (client && (icl_ua < USBIN_25MA))
+	if (icl_ua < USBIN_25MA)
 		return smblib_set_usb_suspend(chg, true);
 
 	disable_irq_nosync(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq);
-	if (!client)
+	if (icl_ua == INT_MAX)
 		goto override_suspend_config;
 
 	rc = smblib_get_prop_typec_mode(chg, &pval);
@@ -904,8 +863,7 @@
 			goto enable_icl_changed_interrupt;
 		}
 	} else {
-		rc = smblib_set_charge_param(chg, &chg->param.usb_icl,
-				icl_ua - chg->icl_reduction_ua);
+		rc = smblib_set_charge_param(chg, &chg->param.usb_icl, icl_ua);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't set HC ICL rc=%d\n", rc);
 			goto enable_icl_changed_interrupt;
@@ -915,7 +873,7 @@
 override_suspend_config:
 	/* determine if override needs to be enforced */
 	override = true;
-	if (client == NULL) {
+	if (icl_ua == INT_MAX) {
 		/* remove override if no voters - hw defaults is desired */
 		override = false;
 	} else if (pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
@@ -923,7 +881,7 @@
 			/* For std cable with type = SDP never override */
 			override = false;
 		else if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB_CDP
-			&& icl_ua - chg->icl_reduction_ua == 1500000)
+			&& icl_ua == 1500000)
 			/*
 			 * For std cable with type = CDP override only if
 			 * current is not 1500mA
@@ -953,6 +911,22 @@
 	return rc;
 }
 
+/*********************
+ * VOTABLE CALLBACKS *
+ *********************/
+
+static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data,
+			int suspend, const char *client)
+{
+	struct smb_charger *chg = data;
+
+	/* resume input if suspend is invalid */
+	if (suspend < 0)
+		suspend = 0;
+
+	return smblib_set_dc_suspend(chg, (bool)suspend);
+}
+
 static int smblib_dc_icl_vote_callback(struct votable *votable, void *data,
 			int icl_ua, const char *client)
 {
@@ -1089,16 +1063,6 @@
 	int rc;
 
 	if (apsd_disable) {
-		/* Don't run APSD on CC debounce when APSD is disabled */
-		rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
-							APSD_START_ON_CC_BIT,
-							0);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
-									rc);
-			return rc;
-		}
-
 		rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
 							AUTO_SRC_DETECT_BIT,
 							0);
@@ -1114,15 +1078,6 @@
 			smblib_err(chg, "Couldn't enable APSD rc=%d\n", rc);
 			return rc;
 		}
-
-		rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
-							APSD_START_ON_CC_BIT,
-							APSD_START_ON_CC_BIT);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't enable APSD_START_ON_CC rc=%d\n",
-									rc);
-			return rc;
-		}
 	}
 
 	return 0;
@@ -1159,6 +1114,26 @@
 	return rc;
 }
 
+static int smblib_usb_irq_enable_vote_callback(struct votable *votable,
+				void *data, int enable, const char *client)
+{
+	struct smb_charger *chg = data;
+
+	if (!chg->irq_info[INPUT_CURRENT_LIMIT_IRQ].irq ||
+				!chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq)
+		return 0;
+
+	if (enable) {
+		enable_irq(chg->irq_info[INPUT_CURRENT_LIMIT_IRQ].irq);
+		enable_irq(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
+	} else {
+		disable_irq(chg->irq_info[INPUT_CURRENT_LIMIT_IRQ].irq);
+		disable_irq(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
+	}
+
+	return 0;
+}
+
 /*******************
  * VCONN REGULATOR *
  * *****************/
@@ -1281,11 +1256,14 @@
 /*****************
  * OTG REGULATOR *
  *****************/
-
+#define MAX_RETRY		15
+#define MIN_DELAY_US		2000
+#define MAX_DELAY_US		9000
 static int _smblib_vbus_regulator_enable(struct regulator_dev *rdev)
 {
 	struct smb_charger *chg = rdev_get_drvdata(rdev);
-	int rc;
+	int rc, retry_count = 0, min_delay = MIN_DELAY_US;
+	u8 stat;
 
 	smblib_dbg(chg, PR_OTG, "halt 1 in 8 mode\n");
 	rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
@@ -1304,6 +1282,42 @@
 		return rc;
 	}
 
+	if (chg->wa_flags & OTG_WA) {
+		/* check for softstart */
+		do {
+			usleep_range(min_delay, min_delay + 100);
+			rc = smblib_read(chg, OTG_STATUS_REG, &stat);
+			if (rc < 0) {
+				smblib_err(chg,
+					"Couldn't read OTG status rc=%d\n",
+					rc);
+				goto out;
+			}
+
+			if (stat & BOOST_SOFTSTART_DONE_BIT) {
+				rc = smblib_set_charge_param(chg,
+					&chg->param.otg_cl, chg->otg_cl_ua);
+				if (rc < 0)
+					smblib_err(chg,
+						"Couldn't set otg limit\n");
+				break;
+			}
+
+			/* increase the delay for following iterations */
+			if (retry_count > 5)
+				min_delay = MAX_DELAY_US;
+		} while (retry_count++ < MAX_RETRY);
+
+		if (retry_count >= MAX_RETRY) {
+			smblib_dbg(chg, PR_OTG, "Boost Softstart not done\n");
+			goto out;
+		}
+	}
+
+	return 0;
+out:
+	/* disable OTG if softstart failed */
+	smblib_write(chg, CMD_OTG_REG, 0);
 	return rc;
 }
 
@@ -1316,6 +1330,14 @@
 	if (chg->otg_en)
 		goto unlock;
 
+	if (!chg->usb_icl_votable) {
+		chg->usb_icl_votable = find_votable("USB_ICL");
+
+		if (!chg->usb_icl_votable)
+			return -EINVAL;
+	}
+	vote(chg->usb_icl_votable, USBIN_USBIN_BOOST_VOTER, true, 0);
+
 	rc = _smblib_vbus_regulator_enable(rdev);
 	if (rc >= 0)
 		chg->otg_en = true;
@@ -1337,6 +1359,17 @@
 			smblib_err(chg, "Couldn't disable VCONN rc=%d\n", rc);
 	}
 
+	if (chg->wa_flags & OTG_WA) {
+		/* set OTG current limit to minimum value */
+		rc = smblib_set_charge_param(chg, &chg->param.otg_cl,
+						chg->param.otg_cl.min_u);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't set otg current limit rc=%d\n", rc);
+			return rc;
+		}
+	}
+
 	smblib_dbg(chg, PR_OTG, "disabling OTG\n");
 	rc = smblib_write(chg, CMD_OTG_REG, 0);
 	if (rc < 0) {
@@ -1345,7 +1378,6 @@
 	}
 
 	smblib_dbg(chg, PR_OTG, "start 1 in 8 mode\n");
-	rc = smblib_write(chg, CMD_OTG_REG, 0);
 	rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
 				 ENG_BUCKBOOST_HALT1_8_MODE_BIT, 0);
 	if (rc < 0) {
@@ -1369,6 +1401,8 @@
 	if (rc >= 0)
 		chg->otg_en = false;
 
+	if (chg->usb_icl_votable)
+		vote(chg->usb_icl_votable, USBIN_USBIN_BOOST_VOTER, false, 0);
 unlock:
 	mutex_unlock(&chg->otg_oc_lock);
 	return rc;
@@ -1682,6 +1716,23 @@
 	return 0;
 }
 
+int smblib_get_prop_charge_qnovo_enable(struct smb_charger *chg,
+				  union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, QNOVO_PT_ENABLE_CMD_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read QNOVO_PT_ENABLE_CMD rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	val->intval = (bool)(stat & QNOVO_PT_ENABLE_CMD_BIT);
+	return 0;
+}
+
 /***********************
  * BATTERY PSY SETTERS *
  ***********************/
@@ -1733,6 +1784,10 @@
 		return -EINVAL;
 
 	chg->system_temp_level = val->intval;
+	/* disable parallel charge in case of system temp level */
+	vote(chg->pl_disable_votable, THERMAL_DAEMON_VOTER,
+			chg->system_temp_level ? true : false, 0);
+
 	if (chg->system_temp_level == chg->thermal_levels)
 		return vote(chg->chg_disable_votable,
 			THERMAL_DAEMON_VOTER, true, 0);
@@ -1746,6 +1801,22 @@
 	return 0;
 }
 
+int smblib_set_prop_charge_qnovo_enable(struct smb_charger *chg,
+				  const union power_supply_propval *val)
+{
+	int rc = 0;
+
+	rc = smblib_masked_write(chg, QNOVO_PT_ENABLE_CMD_REG,
+			QNOVO_PT_ENABLE_CMD_BIT,
+			val->intval ? QNOVO_PT_ENABLE_CMD_BIT : 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't enable qnovo rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
 int smblib_rerun_aicl(struct smb_charger *chg)
 {
 	int rc, settled_icl_ua;
@@ -1951,7 +2022,7 @@
 	int rc = 0;
 	u8 stat;
 
-	if (get_client_vote(chg->usb_icl_votable, USER_VOTER) == 0) {
+	if (get_client_vote_locked(chg->usb_icl_votable, USER_VOTER) == 0) {
 		val->intval = false;
 		return rc;
 	}
@@ -2404,6 +2475,22 @@
 		return -EINVAL;
 	}
 
+	if (power_role == UFP_EN_CMD_BIT) {
+		/* disable PBS workaround when forcing sink mode */
+		rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0x0);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't write to TM_IO_DTEST4_SEL rc=%d\n",
+				rc);
+		}
+	} else {
+		/* restore it back to 0xA5 */
+		rc = smblib_write(chg, TM_IO_DTEST4_SEL, 0xA5);
+		if (rc < 0) {
+			smblib_err(chg, "Couldn't write to TM_IO_DTEST4_SEL rc=%d\n",
+				rc);
+		}
+	}
+
 	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
 				 TYPEC_POWER_ROLE_CMD_MASK, power_role);
 	if (rc < 0) {
@@ -2429,10 +2516,6 @@
 		return rc;
 	}
 
-	if (chg->mode == PARALLEL_MASTER)
-		vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER,
-		     min_uv > MICRO_5V, 0);
-
 	chg->voltage_min_uv = min_uv;
 	return rc;
 }
@@ -2452,10 +2535,6 @@
 	}
 
 	chg->voltage_max_uv = max_uv;
-	rc = smblib_rerun_aicl(chg);
-	if (rc < 0)
-		smblib_err(chg, "Couldn't re-run AICL rc=%d\n", rc);
-
 	return rc;
 }
 
@@ -2475,6 +2554,7 @@
 
 	vote(chg->apsd_disable_votable, PD_VOTER, pd_active, 0);
 	vote(chg->pd_allowed_votable, PD_VOTER, pd_active, 0);
+	vote(chg->usb_irq_enable_votable, PD_VOTER, pd_active, 0);
 
 	/*
 	 * VCONN_EN_ORIENTATION_BIT controls whether to use CC1 or CC2 line
@@ -2510,6 +2590,9 @@
 			return rc;
 		}
 
+		/* since PD was found the cable must be non-legacy */
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
+
 		/* clear USB ICL vote for DCP_VOTER */
 		rc = vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
 		if (rc < 0)
@@ -2517,29 +2600,12 @@
 				"Couldn't un-vote DCP from USB ICL rc=%d\n",
 				rc);
 
-		/* clear USB ICL vote for PL_USBIN_USBIN_VOTER */
-		rc = vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
-		if (rc < 0)
-			smblib_err(chg,
-					"Couldn't un-vote PL_USBIN_USBIN from USB ICL rc=%d\n",
-					rc);
-
 		/* remove USB_PSY_VOTER */
 		rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't unvote USB_PSY rc=%d\n", rc);
 			return rc;
 		}
-
-		/* pd active set, parallel charger can be enabled now */
-		rc = vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER,
-				false, 0);
-		if (rc < 0) {
-			smblib_err(chg,
-				"Couldn't unvote PL_DELAY_HVDCP_VOTER rc=%d\n",
-				rc);
-			return rc;
-		}
 	}
 
 	/* CC pin selection s/w override in PD session; h/w otherwise. */
@@ -2627,12 +2693,6 @@
 
 static struct reg_info cc2_detach_settings[] = {
 	{
-		.reg	= TYPE_C_CFG_REG,
-		.mask	= APSD_START_ON_CC_BIT,
-		.val	= 0,
-		.desc	= "TYPE_C_CFG_REG",
-	},
-	{
 		.reg	= TYPE_C_CFG_2_REG,
 		.mask	= TYPE_C_UFP_MODE_BIT | EN_TRY_SOURCE_MODE_BIT,
 		.val	= TYPE_C_UFP_MODE_BIT,
@@ -2794,15 +2854,21 @@
 #define TYPEC_DEFAULT_CURRENT_MA	900000
 #define TYPEC_MEDIUM_CURRENT_MA		1500000
 #define TYPEC_HIGH_CURRENT_MA		3000000
-static int smblib_get_charge_current(struct smb_charger *chg,
+int smblib_get_charge_current(struct smb_charger *chg,
 				int *total_current_ua)
 {
 	const struct apsd_result *apsd_result = smblib_update_usb_type(chg);
 	union power_supply_propval val = {0, };
-	int rc, typec_source_rd, current_ua;
+	int rc = 0, typec_source_rd, current_ua;
 	bool non_compliant;
 	u8 stat5;
 
+	if (chg->pd_active) {
+		*total_current_ua =
+			get_client_vote_locked(chg->usb_icl_votable, PD_VOTER);
+		return rc;
+	}
+
 	rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat5);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't read TYPE_C_STATUS_5 rc=%d\n", rc);
@@ -2877,39 +2943,12 @@
 	return 0;
 }
 
-int smblib_set_icl_reduction(struct smb_charger *chg, int reduction_ua)
-{
-	int current_ua, rc;
-
-	if (reduction_ua == 0) {
-		vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
-	} else {
-		/*
-		 * No usb_icl voter means we are defaulting to hw chosen
-		 * max limit. We need a vote from s/w to enforce the reduction.
-		 */
-		if (get_effective_result(chg->usb_icl_votable) == -EINVAL) {
-			rc = smblib_get_charge_current(chg, &current_ua);
-			if (rc < 0) {
-				pr_err("Failed to get ICL rc=%d\n", rc);
-				return rc;
-			}
-			vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, true,
-					current_ua);
-		}
-	}
-
-	chg->icl_reduction_ua = reduction_ua;
-
-	return rerun_election(chg->usb_icl_votable);
-}
-
 /************************
  * PARALLEL PSY GETTERS *
  ************************/
 
 int smblib_get_prop_slave_current_now(struct smb_charger *chg,
-				      union power_supply_propval *pval)
+		union power_supply_propval *pval)
 {
 	if (IS_ERR_OR_NULL(chg->iio.batt_i_chan))
 		chg->iio.batt_i_chan = iio_channel_get(chg->dev, "batt_i");
@@ -2946,6 +2985,14 @@
 		return IRQ_HANDLED;
 	}
 
+	if (chg->wa_flags & OTG_WA) {
+		if (stat & OTG_OC_DIS_SW_STS_RT_STS_BIT)
+			smblib_err(chg, "OTG disabled by hw\n");
+
+		/* not handling software based hiccups for PM660 */
+		return IRQ_HANDLED;
+	}
+
 	if (stat & OTG_OVERCURRENT_RT_STS_BIT)
 		schedule_work(&chg->otg_oc_work);
 
@@ -2964,7 +3011,7 @@
 	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
-			rc);
+				rc);
 		return IRQ_HANDLED;
 	}
 
@@ -3069,6 +3116,7 @@
 	return IRQ_HANDLED;
 }
 
+#define PL_DELAY_MS			30000
 irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
 {
 	struct smb_irq_data *irq_data = data;
@@ -3107,6 +3155,11 @@
 				smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
 					rc);
 		}
+
+		/* Schedule work to enable parallel charger */
+		vote(chg->awake_votable, PL_DELAY_VOTER, true, 0);
+		schedule_delayed_work(&chg->pl_enable_work,
+					msecs_to_jiffies(PL_DELAY_MS));
 	} else {
 		if (chg->wa_flags & BOOST_BACK_WA)
 			vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0);
@@ -3161,6 +3214,7 @@
 				|| (stat & AICL_DONE_BIT))
 			delay = 0;
 
+		cancel_delayed_work_sync(&chg->icl_change_work);
 		schedule_delayed_work(&chg->icl_change_work,
 						msecs_to_jiffies(delay));
 	}
@@ -3270,11 +3324,19 @@
 	if (chg->mode == PARALLEL_MASTER)
 		vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, true, 0);
 
-	/* QC authentication done, parallel charger can be enabled now */
-	vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, false, 0);
-
 	/* the APSD done handler will set the USB supply type */
 	apsd_result = smblib_get_apsd_result(chg);
+	if (get_effective_result(chg->hvdcp_hw_inov_dis_votable)) {
+		if (apsd_result->pst == POWER_SUPPLY_TYPE_USB_HVDCP) {
+			/* force HVDCP2 to 9V if INOV is disabled */
+			rc = smblib_masked_write(chg, CMD_HVDCP_2_REG,
+					FORCE_9V_BIT, FORCE_9V_BIT);
+			if (rc < 0)
+				smblib_err(chg,
+					"Couldn't force 9V HVDCP rc=%d\n", rc);
+		}
+	}
+
 	smblib_dbg(chg, PR_INTERRUPT, "IRQ: hvdcp-3p0-auth-done rising; %s detected\n",
 		   apsd_result->name);
 }
@@ -3292,6 +3354,10 @@
 			/* could be a legacy cable, try doing hvdcp */
 			try_rerun_apsd_for_hvdcp(chg);
 
+		/* enable HDC and ICL irq for QC2/3 charger */
+		if (qc_charger)
+			vote(chg->usb_irq_enable_votable, QC_VOTER, true, 0);
+
 		/*
 		 * HVDCP detection timeout done
 		 * If adapter is not QC2.0/QC3.0 - it is a plain old DCP.
@@ -3300,15 +3366,6 @@
 			/* enforce DCP ICL if specified */
 			vote(chg->usb_icl_votable, DCP_VOTER,
 				chg->dcp_icl_ua != -EINVAL, chg->dcp_icl_ua);
-		/*
-		 * If adapter is not QC2.0/QC3.0 remove vote for parallel
-		 * disable.
-		 * Otherwise if adapter is QC2.0/QC3.0 wait for authentication
-		 * to complete.
-		 */
-		if (!qc_charger)
-			vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER,
-					false, 0);
 	}
 
 	smblib_dbg(chg, PR_INTERRUPT, "IRQ: smblib_handle_hvdcp_check_timeout %s\n",
@@ -3328,6 +3385,37 @@
 		   rising ? "rising" : "falling");
 }
 
+static void smblib_force_legacy_icl(struct smb_charger *chg, int pst)
+{
+	switch (pst) {
+	case POWER_SUPPLY_TYPE_USB:
+		/*
+		 * USB_PSY will vote to increase the current to 500/900mA once
+		 * enumeration is done. Ensure that USB_PSY has at least voted
+		 * for 100mA before releasing the LEGACY_UNKNOWN vote
+		 */
+		if (!is_client_vote_enabled(chg->usb_icl_votable,
+								USB_PSY_VOTER))
+			vote(chg->usb_icl_votable, USB_PSY_VOTER, true, 100000);
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
+		break;
+	case POWER_SUPPLY_TYPE_USB_CDP:
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 1500000);
+		break;
+	case POWER_SUPPLY_TYPE_USB_DCP:
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 1500000);
+		break;
+	case POWER_SUPPLY_TYPE_USB_HVDCP:
+	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 3000000);
+		break;
+	default:
+		smblib_err(chg, "Unknown APSD %d; forcing 500mA\n", pst);
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 500000);
+		break;
+	}
+}
+
 #define HVDCP_DET_MS 2500
 static void smblib_handle_apsd_done(struct smb_charger *chg, bool rising)
 {
@@ -3337,6 +3425,10 @@
 		return;
 
 	apsd_result = smblib_update_usb_type(chg);
+
+	if (!chg->pd_active)
+		smblib_force_legacy_icl(chg, apsd_result->pst);
+
 	switch (apsd_result->bit) {
 	case SDP_CHARGER_BIT:
 	case CDP_CHARGER_BIT:
@@ -3349,13 +3441,9 @@
 		break;
 	case OCP_CHARGER_BIT:
 	case FLOAT_CHARGER_BIT:
-		/*
-		 * if not DCP then no hvdcp timeout happens. Enable
-		 * pd/parallel here.
-		 */
+		/* if not DCP then no hvdcp timeout happens, Enable pd here. */
 		vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
 				false, 0);
-		vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, false, 0);
 		break;
 	case DCP_CHARGER_BIT:
 		if (chg->wa_flags & QC_CHARGER_DETECTION_WA_BIT)
@@ -3421,6 +3509,9 @@
 {
 	int rc;
 
+	/* reset legacy unknown vote */
+	vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
+
 	/* reset both usbin current and voltage votes */
 	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
 	vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
@@ -3464,16 +3555,19 @@
 		smblib_err(chg,
 			"Couldn't un-vote DCP from USB ICL rc=%d\n", rc);
 
-	/* clear USB ICL vote for PL_USBIN_USBIN_VOTER */
-	rc = vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
-	if (rc < 0)
-		smblib_err(chg,
-			"Couldn't un-vote PL_USBIN_USBIN from USB ICL rc=%d\n",
-			rc);
 }
 
 static void typec_source_insertion(struct smb_charger *chg)
 {
+	/*
+	 * at any time we want LEGACY_UNKNOWN, PD, or USB_PSY to be voting for
+	 * ICL, so vote LEGACY_UNKNOWN here if none of the above three have
+	 * casted their votes
+	 */
+	if (!is_client_vote_enabled(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER)
+		&& !is_client_vote_enabled(chg->usb_icl_votable, PD_VOTER)
+		&& !is_client_vote_enabled(chg->usb_icl_votable, USB_PSY_VOTER))
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 100000);
 }
 
 static void typec_sink_insertion(struct smb_charger *chg)
@@ -3494,11 +3588,16 @@
 
 static void smblib_handle_typec_removal(struct smb_charger *chg)
 {
+	int rc;
+
+	cancel_delayed_work_sync(&chg->pl_enable_work);
+	vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
+	vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
+
 	vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, true, 0);
 	vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER, true, 0);
-	vote(chg->pd_disallowed_votable_indirect, LEGACY_CABLE_VOTER, true, 0);
-	vote(chg->pd_disallowed_votable_indirect, VBUS_CC_SHORT_VOTER, true, 0);
-	vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, true, 0);
+	vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0);
+	vote(chg->usb_irq_enable_votable, QC_VOTER, false, 0);
 
 	/* reset votes from vbus_cc_short */
 	vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
@@ -3516,10 +3615,13 @@
 	chg->pulse_cnt = 0;
 	chg->usb_icl_delta_ua = 0;
 
-	chg->usb_ever_removed = true;
+	/* enable APSD CC trigger for next insertion */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+				APSD_START_ON_CC_BIT, APSD_START_ON_CC_BIT);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't enable APSD_START_ON_CC rc=%d\n", rc);
 
 	smblib_update_usb_type(chg);
-
 	typec_source_removal(chg);
 	typec_sink_removal(chg);
 }
@@ -3527,12 +3629,16 @@
 static void smblib_handle_typec_insertion(struct smb_charger *chg,
 		bool sink_attached, bool legacy_cable)
 {
-	int rp;
-	bool vbus_cc_short = false;
-	bool valid_legacy_cable;
+	int rp, rc;
 
 	vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, false, 0);
 
+	/* disable APSD CC trigger since CC is attached */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_REG, APSD_START_ON_CC_BIT, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
+									rc);
+
 	if (sink_attached) {
 		typec_source_removal(chg);
 		typec_sink_insertion(chg);
@@ -3541,25 +3647,16 @@
 		typec_sink_removal(chg);
 	}
 
-	valid_legacy_cable = legacy_cable &&
-		(chg->usb_ever_removed || !smblib_sysok_reason_usbin(chg));
-	vote(chg->pd_disallowed_votable_indirect, LEGACY_CABLE_VOTER,
-			valid_legacy_cable, 0);
-
-	if (valid_legacy_cable) {
-		rp = smblib_get_prop_ufp_mode(chg);
-		if (rp == POWER_SUPPLY_TYPEC_SOURCE_HIGH
-				|| rp == POWER_SUPPLY_TYPEC_NON_COMPLIANT) {
-			vbus_cc_short = true;
-			smblib_err(chg, "Disabling PD and HVDCP, VBUS-CC shorted, rp = %d found\n",
-					rp);
-		}
+	rp = smblib_get_prop_ufp_mode(chg);
+	if (rp == POWER_SUPPLY_TYPEC_SOURCE_HIGH
+			|| rp == POWER_SUPPLY_TYPEC_NON_COMPLIANT) {
+		smblib_dbg(chg, PR_MISC, "VBUS & CC could be shorted; keeping HVDCP disabled\n");
+		vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
+								true, 0);
+	} else {
+		vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
+								false, 0);
 	}
-
-	vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
-			vbus_cc_short, 0);
-	vote(chg->pd_disallowed_votable_indirect, VBUS_CC_SHORT_VOTER,
-			vbus_cc_short, 0);
 }
 
 static void smblib_handle_typec_debounce_done(struct smb_charger *chg,
@@ -3962,6 +4059,9 @@
 	int rc, i;
 	u8 stat;
 
+	if (chg->micro_usb_mode)
+		return;
+
 	smblib_err(chg, "over-current detected on VCONN\n");
 	if (!chg->vconn_vreg || !chg->vconn_vreg->rdev)
 		return;
@@ -4056,6 +4156,16 @@
 	smblib_dbg(chg, PR_INTERRUPT, "icl_settled=%d\n", settled_ua);
 }
 
+static void smblib_pl_enable_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+							pl_enable_work.work);
+
+	smblib_dbg(chg, PR_PARALLEL, "timer expired, enabling parallel\n");
+	vote(chg->pl_disable_votable, PL_DELAY_VOTER, false, 0);
+	vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
+}
+
 static int smblib_create_votables(struct smb_charger *chg)
 {
 	int rc = 0;
@@ -4072,13 +4182,19 @@
 		return rc;
 	}
 
+	chg->usb_icl_votable = find_votable("USB_ICL");
+	if (!chg->usb_icl_votable) {
+		rc = -EPROBE_DEFER;
+		return rc;
+	}
+
 	chg->pl_disable_votable = find_votable("PL_DISABLE");
 	if (!chg->pl_disable_votable) {
 		rc = -EPROBE_DEFER;
 		return rc;
 	}
 	vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, true, 0);
-	vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, true, 0);
+	vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
 
 	chg->dc_suspend_votable = create_votable("DC_SUSPEND", VOTE_SET_ANY,
 					smblib_dc_suspend_vote_callback,
@@ -4088,14 +4204,6 @@
 		return rc;
 	}
 
-	chg->usb_icl_votable = create_votable("USB_ICL", VOTE_MIN,
-					smblib_usb_icl_vote_callback,
-					chg);
-	if (IS_ERR(chg->usb_icl_votable)) {
-		rc = PTR_ERR(chg->usb_icl_votable);
-		return rc;
-	}
-
 	chg->dc_icl_votable = create_votable("DC_ICL", VOTE_MIN,
 					smblib_dc_icl_vote_callback,
 					chg);
@@ -4181,6 +4289,15 @@
 		return rc;
 	}
 
+	chg->usb_irq_enable_votable = create_votable("USB_IRQ_DISABLE",
+					VOTE_SET_ANY,
+					smblib_usb_irq_enable_vote_callback,
+					chg);
+	if (IS_ERR(chg->usb_irq_enable_votable)) {
+		rc = PTR_ERR(chg->usb_irq_enable_votable);
+		return rc;
+	}
+
 	return rc;
 }
 
@@ -4237,6 +4354,7 @@
 	INIT_WORK(&chg->vconn_oc_work, smblib_vconn_oc_work);
 	INIT_DELAYED_WORK(&chg->otg_ss_done_work, smblib_otg_ss_done_work);
 	INIT_DELAYED_WORK(&chg->icl_change_work, smblib_icl_change_work);
+	INIT_DELAYED_WORK(&chg->pl_enable_work, smblib_pl_enable_work);
 	chg->fake_capacity = -EINVAL;
 
 	switch (chg->mode) {
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 21ccd3c..49b9d3d 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -32,10 +32,12 @@
 #define USER_VOTER			"USER_VOTER"
 #define PD_VOTER			"PD_VOTER"
 #define DCP_VOTER			"DCP_VOTER"
+#define QC_VOTER			"QC_VOTER"
 #define PL_USBIN_USBIN_VOTER		"PL_USBIN_USBIN_VOTER"
 #define USB_PSY_VOTER			"USB_PSY_VOTER"
 #define PL_TAPER_WORK_RUNNING_VOTER	"PL_TAPER_WORK_RUNNING_VOTER"
 #define PL_INDIRECT_VOTER		"PL_INDIRECT_VOTER"
+#define PL_QNOVO_VOTER			"PL_QNOVO_VOTER"
 #define USBIN_I_VOTER			"USBIN_I_VOTER"
 #define USBIN_V_VOTER			"USBIN_V_VOTER"
 #define CHG_STATE_VOTER			"CHG_STATE_VOTER"
@@ -47,17 +49,18 @@
 #define PD_DISALLOWED_INDIRECT_VOTER	"PD_DISALLOWED_INDIRECT_VOTER"
 #define PD_HARD_RESET_VOTER		"PD_HARD_RESET_VOTER"
 #define VBUS_CC_SHORT_VOTER		"VBUS_CC_SHORT_VOTER"
-#define LEGACY_CABLE_VOTER		"LEGACY_CABLE_VOTER"
 #define PD_INACTIVE_VOTER		"PD_INACTIVE_VOTER"
 #define BOOST_BACK_VOTER		"BOOST_BACK_VOTER"
+#define USBIN_USBIN_BOOST_VOTER		"USBIN_USBIN_BOOST_VOTER"
 #define HVDCP_INDIRECT_VOTER		"HVDCP_INDIRECT_VOTER"
 #define MICRO_USB_VOTER			"MICRO_USB_VOTER"
 #define DEBUG_BOARD_VOTER		"DEBUG_BOARD_VOTER"
 #define PD_SUSPEND_SUPPORTED_VOTER	"PD_SUSPEND_SUPPORTED_VOTER"
-#define PL_DELAY_HVDCP_VOTER		"PL_DELAY_HVDCP_VOTER"
+#define PL_DELAY_VOTER			"PL_DELAY_VOTER"
 #define CTM_VOTER			"CTM_VOTER"
 #define SW_QC3_VOTER			"SW_QC3_VOTER"
 #define AICL_RERUN_VOTER		"AICL_RERUN_VOTER"
+#define LEGACY_UNKNOWN_VOTER		"LEGACY_UNKNOWN_VOTER"
 
 #define VCONN_MAX_ATTEMPTS	3
 #define OTG_MAX_ATTEMPTS	3
@@ -80,6 +83,7 @@
 	BOOST_BACK_WA			= BIT(1),
 	TYPEC_CC2_REMOVAL_WA_BIT	= BIT(2),
 	QC_AUTH_INTERRUPT_WA_BIT	= BIT(3),
+	OTG_WA				= BIT(4),
 };
 
 enum smb_irq_index {
@@ -271,6 +275,7 @@
 	struct votable		*hvdcp_enable_votable;
 	struct votable		*apsd_disable_votable;
 	struct votable		*hvdcp_hw_inov_dis_votable;
+	struct votable		*usb_irq_enable_votable;
 
 	/* work */
 	struct work_struct	bms_update_work;
@@ -283,6 +288,7 @@
 	struct work_struct	vconn_oc_work;
 	struct delayed_work	otg_ss_done_work;
 	struct delayed_work	icl_change_work;
+	struct delayed_work	pl_enable_work;
 
 	/* cached status */
 	int			voltage_min_uv;
@@ -305,17 +311,16 @@
 	int			otg_attempts;
 	int			vconn_attempts;
 	int			default_icl_ua;
+	int			otg_cl_ua;
 
 	/* workaround flag */
 	u32			wa_flags;
 	enum cc2_sink_type	cc2_sink_detach_flag;
 	int			boost_current_ua;
+	int			temp_speed_reading_count;
 
 	/* extcon for VBUS / ID notification to USB for uUSB */
 	struct extcon_dev	*extcon;
-	bool			usb_ever_removed;
-
-	int			icl_reduction_ua;
 
 	/* qnovo */
 	int			qnovo_fcc_ua;
@@ -453,6 +458,8 @@
 				union power_supply_propval *val);
 int smblib_get_prop_die_health(struct smb_charger *chg,
 			       union power_supply_propval *val);
+int smblib_get_prop_charge_qnovo_enable(struct smb_charger *chg,
+			       union power_supply_propval *val);
 int smblib_set_prop_pd_current_max(struct smb_charger *chg,
 				const union power_supply_propval *val);
 int smblib_set_prop_usb_current_max(struct smb_charger *chg,
@@ -473,14 +480,17 @@
 				union power_supply_propval *val);
 int smblib_set_prop_ship_mode(struct smb_charger *chg,
 				const union power_supply_propval *val);
+int smblib_set_prop_charge_qnovo_enable(struct smb_charger *chg,
+				const union power_supply_propval *val);
 void smblib_suspend_on_debug_battery(struct smb_charger *chg);
 int smblib_rerun_apsd_if_required(struct smb_charger *chg);
 int smblib_get_prop_fcc_delta(struct smb_charger *chg,
 			       union power_supply_propval *val);
 int smblib_icl_override(struct smb_charger *chg, bool override);
-int smblib_set_icl_reduction(struct smb_charger *chg, int reduction_ua);
 int smblib_dp_dm(struct smb_charger *chg, int val);
 int smblib_rerun_aicl(struct smb_charger *chg);
+int smblib_set_icl_current(struct smb_charger *chg, int icl_ua);
+int smblib_get_charge_current(struct smb_charger *chg, int *total_current_ua);
 
 int smblib_init(struct smb_charger *chg);
 int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
index 54b6b38..167666a 100644
--- a/drivers/power/supply/qcom/smb-reg.h
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -628,6 +628,7 @@
 
 #define USBIN_LOAD_CFG_REG			(USBIN_BASE + 0x65)
 #define USBIN_OV_CH_LOAD_OPTION_BIT		BIT(7)
+#define ICL_OVERRIDE_AFTER_APSD_BIT		BIT(4)
 
 #define USBIN_ICL_OPTIONS_REG			(USBIN_BASE + 0x66)
 #define CFG_USB3P0_SEL_BIT			BIT(2)
@@ -918,6 +919,7 @@
 
 #define MISC_CFG_REG				(MISC_BASE + 0x52)
 #define GSM_PA_ON_ADJ_SEL_BIT			BIT(0)
+#define STAT_PARALLEL_1400MA_EN_CFG_BIT		BIT(3)
 #define TCC_DEBOUNCE_20MS_BIT			BIT(5)
 
 #define SNARL_BARK_BITE_WD_CFG_REG		(MISC_BASE + 0x53)
@@ -1018,6 +1020,8 @@
 #define CFG_BUCKBOOST_FREQ_SELECT_BUCK_REG	(MISC_BASE + 0xA0)
 #define CFG_BUCKBOOST_FREQ_SELECT_BOOST_REG	(MISC_BASE + 0xA1)
 
+#define TM_IO_DTEST4_SEL			(MISC_BASE + 0xE9)
+
 /* CHGR FREQ Peripheral registers */
 #define FREQ_CLK_DIV_REG			(CHGR_FREQ_BASE + 0x50)
 
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index 1e89a721..83374bb 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -28,7 +28,7 @@
 #include "smb-reg.h"
 #include "smb-lib.h"
 #include "storm-watch.h"
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
 
 #define SMB138X_DEFAULT_FCC_UA 1000000
 #define SMB138X_DEFAULT_ICL_UA 1500000
@@ -45,6 +45,7 @@
 #define STACKED_DIODE_EN_BIT		BIT(2)
 
 #define TDIE_AVG_COUNT	10
+#define MAX_SPEED_READING_TIMES		5
 
 enum {
 	OOB_COMP_WA_BIT = BIT(0),
@@ -95,6 +96,7 @@
 	int	dc_icl_ua;
 	int	chg_temp_max_mdegc;
 	int	connector_temp_max_mdegc;
+	int	pl_mode;
 };
 
 struct smb138x {
@@ -126,8 +128,16 @@
 	union power_supply_propval pval;
 	int rc = 0, avg = 0, i;
 	struct smb_charger *chg = &chip->chg;
+	int die_avg_count;
 
-	for (i = 0; i < TDIE_AVG_COUNT; i++) {
+	if (chg->temp_speed_reading_count < MAX_SPEED_READING_TIMES) {
+		chg->temp_speed_reading_count++;
+		die_avg_count = 1;
+	} else {
+		die_avg_count = TDIE_AVG_COUNT;
+	}
+
+	for (i = 0; i < die_avg_count; i++) {
 		pval.intval = 0;
 		rc = smblib_get_prop_charger_temp(chg, &pval);
 		if (rc < 0) {
@@ -137,7 +147,7 @@
 		}
 		avg += pval.intval;
 	}
-	val->intval = avg / TDIE_AVG_COUNT;
+	val->intval = avg / die_avg_count;
 	return rc;
 }
 
@@ -152,6 +162,11 @@
 		return -EINVAL;
 	}
 
+	rc = of_property_read_u32(node,
+				"qcom,parallel-mode", &chip->dt.pl_mode);
+	if (rc < 0)
+		chip->dt.pl_mode = POWER_SUPPLY_PL_USBMID_USBMID;
+
 	chip->dt.suspend_input = of_property_read_bool(node,
 				"qcom,suspend-input");
 
@@ -520,6 +535,8 @@
 	POWER_SUPPLY_PROP_CHARGING_ENABLED,
 	POWER_SUPPLY_PROP_PIN_ENABLED,
 	POWER_SUPPLY_PROP_INPUT_SUSPEND,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX,
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
 	POWER_SUPPLY_PROP_CURRENT_NOW,
@@ -559,6 +576,21 @@
 	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
 		rc = smblib_get_usb_suspend(chg, &val->intval);
 		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+		if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+		|| (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+			rc = smblib_get_prop_input_current_limited(chg, val);
+		else
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+		|| (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+			rc = smblib_get_charge_param(chg, &chg->param.usb_icl,
+				&val->intval);
+		else
+			val->intval = 0;
+		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval);
 		break;
@@ -579,7 +611,7 @@
 		val->strval = "smb138x";
 		break;
 	case POWER_SUPPLY_PROP_PARALLEL_MODE:
-		val->intval = POWER_SUPPLY_PL_USBMID_USBMID;
+		val->intval = chip->dt.pl_mode;
 		break;
 	case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
 		val->intval = smb138x_get_prop_connector_health(chip);
@@ -638,6 +670,12 @@
 	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
 		rc = smb138x_set_parallel_suspend(chip, (bool)val->intval);
 		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+		|| (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+			rc = smblib_set_charge_param(chg, &chg->param.usb_icl,
+				val->intval);
+		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval);
 		break;
@@ -1449,6 +1487,16 @@
 		goto cleanup;
 	}
 
+	if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+		|| (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT)) {
+		rc = smb138x_init_vbus_regulator(chip);
+		if (rc < 0) {
+			pr_err("Couldn't initialize vbus regulator rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
 	rc = smb138x_init_parallel_psy(chip);
 	if (rc < 0) {
 		pr_err("Couldn't initialize parallel psy rc=%d\n", rc);
@@ -1473,6 +1521,8 @@
 	smblib_deinit(chg);
 	if (chip->parallel_psy)
 		power_supply_unregister(chip->parallel_psy);
+	if (chg->vbus_vreg && chg->vbus_vreg->rdev)
+		regulator_unregister(chg->vbus_vreg->rdev);
 	return rc;
 }
 
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index 7bb2068..d314579 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -113,7 +113,7 @@
 #define ALUA_POLICY_SWITCH_ALL		1
 
 static void alua_rtpg_work(struct work_struct *work);
-static void alua_rtpg_queue(struct alua_port_group *pg,
+static bool alua_rtpg_queue(struct alua_port_group *pg,
 			    struct scsi_device *sdev,
 			    struct alua_queue_data *qdata, bool force);
 static void alua_check(struct scsi_device *sdev, bool force);
@@ -862,7 +862,13 @@
 	kref_put(&pg->kref, release_port_group);
 }
 
-static void alua_rtpg_queue(struct alua_port_group *pg,
+/**
+ * alua_rtpg_queue() - cause RTPG to be submitted asynchronously
+ *
+ * Returns true if and only if alua_rtpg_work() will be called asynchronously.
+ * That function is responsible for calling @qdata->fn().
+ */
+static bool alua_rtpg_queue(struct alua_port_group *pg,
 			    struct scsi_device *sdev,
 			    struct alua_queue_data *qdata, bool force)
 {
@@ -870,8 +876,8 @@
 	unsigned long flags;
 	struct workqueue_struct *alua_wq = kaluad_wq;
 
-	if (!pg)
-		return;
+	if (!pg || scsi_device_get(sdev))
+		return false;
 
 	spin_lock_irqsave(&pg->lock, flags);
 	if (qdata) {
@@ -884,14 +890,12 @@
 		pg->flags |= ALUA_PG_RUN_RTPG;
 		kref_get(&pg->kref);
 		pg->rtpg_sdev = sdev;
-		scsi_device_get(sdev);
 		start_queue = 1;
 	} else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) {
 		pg->flags |= ALUA_PG_RUN_RTPG;
 		/* Do not queue if the worker is already running */
 		if (!(pg->flags & ALUA_PG_RUNNING)) {
 			kref_get(&pg->kref);
-			sdev = NULL;
 			start_queue = 1;
 		}
 	}
@@ -900,13 +904,17 @@
 		alua_wq = kaluad_sync_wq;
 	spin_unlock_irqrestore(&pg->lock, flags);
 
-	if (start_queue &&
-	    !queue_delayed_work(alua_wq, &pg->rtpg_work,
-				msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) {
-		if (sdev)
-			scsi_device_put(sdev);
-		kref_put(&pg->kref, release_port_group);
+	if (start_queue) {
+		if (queue_delayed_work(alua_wq, &pg->rtpg_work,
+				msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS)))
+			sdev = NULL;
+		else
+			kref_put(&pg->kref, release_port_group);
 	}
+	if (sdev)
+		scsi_device_put(sdev);
+
+	return true;
 }
 
 /*
@@ -1007,11 +1015,13 @@
 		mutex_unlock(&h->init_mutex);
 		goto out;
 	}
-	fn = NULL;
 	rcu_read_unlock();
 	mutex_unlock(&h->init_mutex);
 
-	alua_rtpg_queue(pg, sdev, qdata, true);
+	if (alua_rtpg_queue(pg, sdev, qdata, true))
+		fn = NULL;
+	else
+		err = SCSI_DH_DEV_OFFLINED;
 	kref_put(&pg->kref, release_port_group);
 out:
 	if (fn)
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 763f012..87f5e694 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -221,7 +221,7 @@
 		task->num_scatter = qc->n_elem;
 	} else {
 		for_each_sg(qc->sg, sg, qc->n_elem, si)
-			xfer += sg->length;
+			xfer += sg_dma_len(sg);
 
 		task->total_xfer_len = xfer;
 		task->num_scatter = si;
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index fe7469c..ad33238 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -2153,8 +2153,6 @@
 		    "Timer for the VP[%d] has stopped\n", vha->vp_idx);
 	}
 
-	BUG_ON(atomic_read(&vha->vref_count));
-
 	qla2x00_free_fcports(vha);
 
 	mutex_lock(&ha->vport_lock);
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 73b12e4..8e63a7b 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -3742,6 +3742,7 @@
 	struct qla8044_reset_template reset_tmplt;
 	struct qla_tgt_counters tgt_counters;
 	uint16_t	bbcr;
+	wait_queue_head_t vref_waitq;
 } scsi_qla_host_t;
 
 struct qla27xx_image_status {
@@ -3780,6 +3781,7 @@
 	mb();						     \
 	if (__vha->flags.delete_progress) {		     \
 		atomic_dec(&__vha->vref_count);		     \
+		wake_up(&__vha->vref_waitq);		\
 		__bail = 1;				     \
 	} else {					     \
 		__bail = 0;				     \
@@ -3788,6 +3790,7 @@
 
 #define QLA_VHA_MARK_NOT_BUSY(__vha) do {		     \
 	atomic_dec(&__vha->vref_count);			     \
+	wake_up(&__vha->vref_waitq);			\
 } while (0)
 
 /*
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 5b09296..8f12f6b 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -4356,6 +4356,7 @@
 			}
 		}
 		atomic_dec(&vha->vref_count);
+		wake_up(&vha->vref_waitq);
 	}
 	spin_unlock_irqrestore(&ha->vport_slock, flags);
 }
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index cf7ba52..3dfb54a 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -74,13 +74,14 @@
 	 * ensures no active vp_list traversal while the vport is removed
 	 * from the queue)
 	 */
+	wait_event_timeout(vha->vref_waitq, atomic_read(&vha->vref_count),
+	    10*HZ);
+
 	spin_lock_irqsave(&ha->vport_slock, flags);
-	while (atomic_read(&vha->vref_count)) {
-		spin_unlock_irqrestore(&ha->vport_slock, flags);
-
-		msleep(500);
-
-		spin_lock_irqsave(&ha->vport_slock, flags);
+	if (atomic_read(&vha->vref_count)) {
+		ql_dbg(ql_dbg_vport, vha, 0xfffa,
+		    "vha->vref_count=%u timeout\n", vha->vref_count.counter);
+		vha->vref_count = (atomic_t)ATOMIC_INIT(0);
 	}
 	list_del(&vha->list);
 	qlt_update_vp_map(vha, RESET_VP_IDX);
@@ -269,6 +270,7 @@
 
 			spin_lock_irqsave(&ha->vport_slock, flags);
 			atomic_dec(&vha->vref_count);
+			wake_up(&vha->vref_waitq);
 		}
 		i++;
 	}
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index bea819e..4f361d8 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -4045,6 +4045,7 @@
 
 	spin_lock_init(&vha->work_lock);
 	spin_lock_init(&vha->cmd_list_lock);
+	init_waitqueue_head(&vha->vref_waitq);
 
 	sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
 	ql_dbg(ql_dbg_init, vha, 0x0041,
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index c05cf3b..44c466b 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1002,6 +1002,8 @@
 		result = get_user(val, ip);
 		if (result)
 			return result;
+		if (val > SG_MAX_CDB_SIZE)
+			return -ENOMEM;
 		sfp->next_cmd_len = (val > 0) ? val : 0;
 		return 0;
 	case SG_GET_VERSION_NUM:
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index d326b80..ee23fc7 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -32,6 +32,7 @@
 #include "ufs_quirks.h"
 #include "ufs-qcom-ice.h"
 #include "ufs-qcom-debugfs.h"
+#include <linux/clk/qcom.h>
 
 #define MAX_PROP_SIZE		   32
 #define VDDP_REF_CLK_MIN_UV        1200000
@@ -356,6 +357,28 @@
 	return err;
 }
 
+static void ufs_qcom_force_mem_config(struct ufs_hba *hba)
+{
+	struct ufs_clk_info *clki;
+
+	/*
+	 * Configure the behavior of ufs clocks core and peripheral
+	 * memory state when they are turned off.
+	 * This configuration is required to allow retaining
+	 * ICE crypto configuration (including keys) when
+	 * core_clk_ice is turned off, and powering down
+	 * non-ICE RAMs of host controller.
+	 */
+	list_for_each_entry(clki, &hba->clk_list_head, list) {
+		if (!strcmp(clki->name, "core_clk_ice"))
+			clk_set_flags(clki->clk, CLKFLAG_RETAIN_MEM);
+		else
+			clk_set_flags(clki->clk, CLKFLAG_NORETAIN_MEM);
+		clk_set_flags(clki->clk, CLKFLAG_NORETAIN_PERIPH);
+		clk_set_flags(clki->clk, CLKFLAG_PERIPH_OFF_CLEAR);
+	}
+}
+
 static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
 				      enum ufs_notify_change_status status)
 {
@@ -364,6 +387,7 @@
 
 	switch (status) {
 	case PRE_CHANGE:
+		ufs_qcom_force_mem_config(hba);
 		ufs_qcom_power_up_sequence(hba);
 		/*
 		 * The PHY PLL output is the source of tx/rx lane symbol
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 712de81..7b91717 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -4308,15 +4308,25 @@
 	 * mode hence full reinit is required to move link to HS speeds.
 	 */
 	if (ret || hba->full_init_linereset) {
+		int err;
+
 		hba->full_init_linereset = false;
 		ufshcd_update_error_stats(hba, UFS_ERR_HIBERN8_ENTER);
 		dev_err(hba->dev, "%s: hibern8 enter failed. ret = %d",
 			__func__, ret);
 		/*
-		 * If link recovery fails then return error so that caller
-		 * don't retry the hibern8 enter again.
+		 * If link recovery fails then return error code (-ENOLINK)
+		 * returned ufshcd_link_recovery().
+		 * If link recovery succeeds then return -EAGAIN to attempt
+		 * hibern8 enter retry again.
 		 */
-		ret = ufshcd_link_recovery(hba);
+		err = ufshcd_link_recovery(hba);
+		if (err) {
+			dev_err(hba->dev, "%s: link recovery failed", __func__);
+			ret = err;
+		} else {
+			ret = -EAGAIN;
+		}
 	} else {
 		dev_dbg(hba->dev, "%s: Hibern8 Enter at %lld us", __func__,
 			ktime_to_us(ktime_get()));
@@ -4333,8 +4343,8 @@
 		ret = __ufshcd_uic_hibern8_enter(hba);
 		if (!ret)
 			goto out;
-		/* Unable to recover the link, so no point proceeding */
-		 if (ret == -ENOLINK)
+		else if (ret != -EAGAIN)
+			/* Unable to recover the link, so no point proceeding */
 			BUG();
 	}
 out:
diff --git a/drivers/soc/qcom/eud.c b/drivers/soc/qcom/eud.c
index 11965a2..1455069 100644
--- a/drivers/soc/qcom/eud.c
+++ b/drivers/soc/qcom/eud.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -144,10 +144,10 @@
 					eud_work);
 
 	if (chip->int_status == EUD_INT_VBUS)
-		extcon_set_cable_state_(chip->extcon, chip->extcon_id,
+		extcon_set_state_sync(chip->extcon, chip->extcon_id,
 					chip->usb_attach);
 	else if (chip->int_status == EUD_INT_CHGR)
-		extcon_set_cable_state_(chip->extcon, chip->extcon_id,
+		extcon_set_state_sync(chip->extcon, chip->extcon_id,
 					chip->chgr_enable);
 }
 
diff --git a/drivers/soc/qcom/glink_loopback_server.c b/drivers/soc/qcom/glink_loopback_server.c
index 0aeb0e8..3b540f3 100644
--- a/drivers/soc/qcom/glink_loopback_server.c
+++ b/drivers/soc/qcom/glink_loopback_server.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -141,6 +141,7 @@
 	{"LOOPBACK_CTL_APSS", "lpass", "smem"},
 	{"LOOPBACK_CTL_APSS", "dsps", "smem"},
 	{"LOOPBACK_CTL_APSS", "spss", "mailbox"},
+	{"LOOPBACK_CTL_APSS", "wdsp", "spi"},
 };
 
 static DEFINE_MUTEX(ctl_ch_list_lock);
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index 2cbbe2e..bbde4b6 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -266,8 +266,8 @@
 		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
 						    "rmb_base");
 		q6->rmb_base = devm_ioremap_resource(&pdev->dev, res);
-		if (!q6->rmb_base)
-			return -ENOMEM;
+		if (IS_ERR(q6->rmb_base))
+			return PTR_ERR(q6->rmb_base);
 		drv->rmb_base = q6->rmb_base;
 		q6_desc->ops = &pil_msa_mss_ops_selfauth;
 	}
diff --git a/drivers/soc/qcom/pil-q6v5.c b/drivers/soc/qcom/pil-q6v5.c
index d9d6c72..15e0309 100644
--- a/drivers/soc/qcom/pil-q6v5.c
+++ b/drivers/soc/qcom/pil-q6v5.c
@@ -628,8 +628,8 @@
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qdsp6_base");
 	drv->reg_base = devm_ioremap_resource(&pdev->dev, res);
-	if (!drv->reg_base)
-		return ERR_PTR(-ENOMEM);
+	if (IS_ERR(drv->reg_base))
+		return drv->reg_base;
 
 	desc = &drv->desc;
 	ret = of_property_read_string(pdev->dev.of_node, "qcom,firmware-name",
diff --git a/drivers/spmi/Kconfig b/drivers/spmi/Kconfig
index 0d3b70b..633632a 100644
--- a/drivers/spmi/Kconfig
+++ b/drivers/spmi/Kconfig
@@ -24,4 +24,14 @@
 	  This is required for communicating with Qualcomm PMICs and
 	  other devices that have the SPMI interface.
 
+config SPMI_MSM_PMIC_ARB_DEBUG
+	tristate "QTI SPMI Debug Controller (PMIC Arbiter)"
+	depends on ARCH_QCOM || COMPILE_TEST
+	depends on HAS_IOMEM
+	help
+	  If you say yes to this option, support will be included for the
+	  built-in SPMI PMIC Arbiter debug interface on Qualcomm Technologies,
+	  Inc. (QTI) MSM family processors.  This feature is available on chips
+	  with PMIC arbiter version 5 and above.
+
 endif
diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile
index fc75104..4f20815 100644
--- a/drivers/spmi/Makefile
+++ b/drivers/spmi/Makefile
@@ -4,3 +4,4 @@
 obj-$(CONFIG_SPMI)	+= spmi.o
 
 obj-$(CONFIG_SPMI_MSM_PMIC_ARB)	+= spmi-pmic-arb.o
+obj-$(CONFIG_SPMI_MSM_PMIC_ARB_DEBUG)	+= spmi-pmic-arb-debug.o
diff --git a/drivers/spmi/spmi-pmic-arb-debug.c b/drivers/spmi/spmi-pmic-arb-debug.c
new file mode 100644
index 0000000..c5a31a9
--- /dev/null
+++ b/drivers/spmi/spmi-pmic-arb-debug.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+
+/* PMIC Arbiter debug register offsets */
+#define PMIC_ARB_DEBUG_CMD0		0x00
+#define PMIC_ARB_DEBUG_CMD1		0x04
+#define PMIC_ARB_DEBUG_CMD2		0x08
+#define PMIC_ARB_DEBUG_CMD3		0x0C
+#define PMIC_ARB_DEBUG_STATUS		0x14
+#define PMIC_ARB_DEBUG_WDATA(n)		(0x18 + 4 * (n))
+#define PMIC_ARB_DEBUG_RDATA(n)		(0x38 + 4 * (n))
+
+/* Transaction status flag bits */
+enum pmic_arb_chnl_status {
+	PMIC_ARB_STATUS_DONE		= BIT(0),
+	PMIC_ARB_STATUS_FAILURE		= BIT(1),
+	PMIC_ARB_STATUS_DENIED		= BIT(2),
+	PMIC_ARB_STATUS_DROPPED		= BIT(3),
+};
+
+/* Command Opcodes */
+enum pmic_arb_cmd_op_code {
+	PMIC_ARB_OP_EXT_WRITEL		= 0,
+	PMIC_ARB_OP_EXT_READL		= 1,
+	PMIC_ARB_OP_EXT_WRITE		= 2,
+	PMIC_ARB_OP_RESET		= 3,
+	PMIC_ARB_OP_SLEEP		= 4,
+	PMIC_ARB_OP_SHUTDOWN		= 5,
+	PMIC_ARB_OP_WAKEUP		= 6,
+	PMIC_ARB_OP_AUTHENTICATE	= 7,
+	PMIC_ARB_OP_MSTR_READ		= 8,
+	PMIC_ARB_OP_MSTR_WRITE		= 9,
+	PMIC_ARB_OP_EXT_READ		= 13,
+	PMIC_ARB_OP_WRITE		= 14,
+	PMIC_ARB_OP_READ		= 15,
+	PMIC_ARB_OP_ZERO_WRITE		= 16,
+};
+
+#define PMIC_ARB_TIMEOUT_US		100
+#define PMIC_ARB_MAX_TRANS_BYTES	8
+#define PMIC_ARB_MAX_SID		0xF
+
+/**
+ * spmi_pmic_arb_debug - SPMI PMIC Arbiter debug object
+ *
+ * @addr:		base address of SPMI PMIC arbiter debug module
+ * @lock:		lock to synchronize accesses.
+ */
+struct spmi_pmic_arb_debug {
+	void __iomem		*addr;
+	raw_spinlock_t		lock;
+};
+
+static inline void pmic_arb_debug_write(struct spmi_pmic_arb_debug *pa,
+				u32 offset, u32 val)
+{
+	writel_relaxed(val, pa->addr + offset);
+}
+
+static inline u32 pmic_arb_debug_read(struct spmi_pmic_arb_debug *pa,
+				u32 offset)
+{
+	return readl_relaxed(pa->addr + offset);
+}
+
+/* pa->lock must be held by the caller. */
+static int pmic_arb_debug_wait_for_done(struct spmi_controller *ctrl)
+{
+	struct spmi_pmic_arb_debug *pa = spmi_controller_get_drvdata(ctrl);
+	u32 status = 0;
+	u32 timeout = PMIC_ARB_TIMEOUT_US;
+
+	while (timeout--) {
+		status = pmic_arb_debug_read(pa, PMIC_ARB_DEBUG_STATUS);
+
+		if (status & PMIC_ARB_STATUS_DONE) {
+			if (status & PMIC_ARB_STATUS_DENIED) {
+				dev_err(&ctrl->dev, "%s: transaction denied (0x%x)\n",
+					__func__, status);
+				return -EPERM;
+			}
+
+			if (status & PMIC_ARB_STATUS_FAILURE) {
+				dev_err(&ctrl->dev, "%s: transaction failed (0x%x)\n",
+					__func__, status);
+				return -EIO;
+			}
+
+			if (status & PMIC_ARB_STATUS_DROPPED) {
+				dev_err(&ctrl->dev, "%s: transaction dropped (0x%x)\n",
+					__func__, status);
+				return -EIO;
+			}
+
+			return 0;
+		}
+		udelay(1);
+	}
+
+	dev_err(&ctrl->dev, "%s: timeout, status 0x%x\n", __func__, status);
+	return -ETIMEDOUT;
+}
+
+/* pa->lock must be held by the caller. */
+static int pmic_arb_debug_issue_command(struct spmi_controller *ctrl, u8 opc,
+				u8 sid, u16 addr, size_t len)
+{
+	struct spmi_pmic_arb_debug *pa = spmi_controller_get_drvdata(ctrl);
+	u16 pid       = (addr >> 8) & 0xFF;
+	u16 offset    = addr & 0xFF;
+	u8 byte_count = len - 1;
+
+	if (byte_count >= PMIC_ARB_MAX_TRANS_BYTES) {
+		dev_err(&ctrl->dev, "pmic-arb supports 1 to %d bytes per transaction, but %zu requested",
+			PMIC_ARB_MAX_TRANS_BYTES, len);
+		return  -EINVAL;
+	}
+
+	if (sid > PMIC_ARB_MAX_SID) {
+		dev_err(&ctrl->dev, "pmic-arb supports sid 0 to %u, but %u requested",
+			PMIC_ARB_MAX_SID, sid);
+		return  -EINVAL;
+	}
+
+	pmic_arb_debug_write(pa, PMIC_ARB_DEBUG_CMD3, offset);
+	pmic_arb_debug_write(pa, PMIC_ARB_DEBUG_CMD2, pid);
+	pmic_arb_debug_write(pa, PMIC_ARB_DEBUG_CMD1, (byte_count << 4) | sid);
+
+	/* Start the transaction */
+	pmic_arb_debug_write(pa, PMIC_ARB_DEBUG_CMD0, opc << 1);
+
+	return pmic_arb_debug_wait_for_done(ctrl);
+}
+
+/* Non-data command */
+static int pmic_arb_debug_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid)
+{
+	dev_dbg(&ctrl->dev, "cmd op:0x%x sid:%d\n", opc, sid);
+
+	/* Check for valid non-data command */
+	if (opc < SPMI_CMD_RESET || opc > SPMI_CMD_WAKEUP)
+		return -EINVAL;
+
+	return -EOPNOTSUPP;
+}
+
+static int pmic_arb_debug_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid,
+				u16 addr, u8 *buf, size_t len)
+{
+	struct spmi_pmic_arb_debug *pa = spmi_controller_get_drvdata(ctrl);
+	unsigned long flags;
+	int i, rc;
+
+	/* Check the opcode */
+	if (opc >= 0x60 && opc <= 0x7F)
+		opc = PMIC_ARB_OP_READ;
+	else if (opc >= 0x20 && opc <= 0x2F)
+		opc = PMIC_ARB_OP_EXT_READ;
+	else if (opc >= 0x38 && opc <= 0x3F)
+		opc = PMIC_ARB_OP_EXT_READL;
+	else
+		return -EINVAL;
+
+	raw_spin_lock_irqsave(&pa->lock, flags);
+
+	rc = pmic_arb_debug_issue_command(ctrl, opc, sid, addr, len);
+	if (rc)
+		goto done;
+
+	/* Read data from FIFO */
+	for (i = 0; i < len; i++)
+		buf[i] = pmic_arb_debug_read(pa, PMIC_ARB_DEBUG_RDATA(i));
+done:
+	raw_spin_unlock_irqrestore(&pa->lock, flags);
+
+	return rc;
+}
+
+static int pmic_arb_debug_write_cmd(struct spmi_controller *ctrl, u8 opc,
+				u8 sid, u16 addr, const u8 *buf, size_t len)
+{
+	struct spmi_pmic_arb_debug *pa = spmi_controller_get_drvdata(ctrl);
+	unsigned long flags;
+	int i, rc;
+
+	if (len > PMIC_ARB_MAX_TRANS_BYTES) {
+		dev_err(&ctrl->dev, "pmic-arb supports 1 to %d bytes per transaction, but %zu requested",
+			PMIC_ARB_MAX_TRANS_BYTES, len);
+		return  -EINVAL;
+	}
+
+	/* Check the opcode */
+	if (opc >= 0x40 && opc <= 0x5F)
+		opc = PMIC_ARB_OP_WRITE;
+	else if (opc >= 0x00 && opc <= 0x0F)
+		opc = PMIC_ARB_OP_EXT_WRITE;
+	else if (opc >= 0x30 && opc <= 0x37)
+		opc = PMIC_ARB_OP_EXT_WRITEL;
+	else if (opc >= 0x80)
+		opc = PMIC_ARB_OP_ZERO_WRITE;
+	else
+		return -EINVAL;
+
+	raw_spin_lock_irqsave(&pa->lock, flags);
+
+	/* Write data to FIFO */
+	for (i = 0; i < len; i++)
+		pmic_arb_debug_write(pa, PMIC_ARB_DEBUG_WDATA(i), buf[i]);
+
+	rc = pmic_arb_debug_issue_command(ctrl, opc, sid, addr, len);
+
+	raw_spin_unlock_irqrestore(&pa->lock, flags);
+
+	return rc;
+}
+
+static int spmi_pmic_arb_debug_probe(struct platform_device *pdev)
+{
+	struct spmi_pmic_arb_debug *pa;
+	struct spmi_controller *ctrl;
+	struct resource *res;
+	int rc;
+	u32 fuse_val, fuse_bit;
+	void __iomem *fuse_addr;
+
+	/* Check if the debug bus is disabled by a fuse. */
+	rc = of_property_read_u32(pdev->dev.of_node, "qcom,fuse-disable-bit",
+				  &fuse_bit);
+	if (!rc) {
+		if (fuse_bit > 31) {
+			dev_err(&pdev->dev, "qcom,fuse-disable-bit supports values 0 to 31, but %u specified\n",
+				fuse_bit);
+			return -EINVAL;
+		}
+
+		res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+						   "fuse");
+		if (!res) {
+			dev_err(&pdev->dev, "fuse address not specified\n");
+			return -EINVAL;
+		}
+
+		fuse_addr = devm_ioremap_resource(&pdev->dev, res);
+		if (IS_ERR(fuse_addr))
+			return PTR_ERR(fuse_addr);
+
+		fuse_val = readl_relaxed(fuse_addr);
+		devm_iounmap(&pdev->dev, fuse_addr);
+
+		if (fuse_val & BIT(fuse_bit)) {
+			dev_err(&pdev->dev, "SPMI PMIC arbiter debug bus disabled by fuse\n");
+			return -ENODEV;
+		}
+	}
+
+
+	ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pa));
+	if (!ctrl)
+		return -ENOMEM;
+
+	pa = spmi_controller_get_drvdata(ctrl);
+
+	res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core");
+	if (!res) {
+		dev_err(&pdev->dev, "core address not specified\n");
+		rc = -EINVAL;
+		goto err_put_ctrl;
+	}
+
+	pa->addr = devm_ioremap_resource(&ctrl->dev, res);
+	if (IS_ERR(pa->addr)) {
+		rc = PTR_ERR(pa->addr);
+		goto err_put_ctrl;
+	}
+
+	platform_set_drvdata(pdev, ctrl);
+	raw_spin_lock_init(&pa->lock);
+
+	ctrl->cmd = pmic_arb_debug_cmd;
+	ctrl->read_cmd = pmic_arb_debug_read_cmd;
+	ctrl->write_cmd = pmic_arb_debug_write_cmd;
+
+	rc = spmi_controller_add(ctrl);
+	if (rc)
+		goto err_put_ctrl;
+
+	dev_info(&ctrl->dev, "SPMI PMIC arbiter debug bus controller added\n");
+
+	return 0;
+
+err_put_ctrl:
+	spmi_controller_put(ctrl);
+	return rc;
+}
+
+static int spmi_pmic_arb_debug_remove(struct platform_device *pdev)
+{
+	struct spmi_controller *ctrl = platform_get_drvdata(pdev);
+
+	spmi_controller_remove(ctrl);
+	spmi_controller_put(ctrl);
+
+	return 0;
+}
+
+static const struct of_device_id spmi_pmic_arb_debug_match_table[] = {
+	{ .compatible = "qcom,spmi-pmic-arb-debug", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, spmi_pmic_arb_debug_match_table);
+
+static struct platform_driver spmi_pmic_arb_debug_driver = {
+	.probe		= spmi_pmic_arb_debug_probe,
+	.remove		= spmi_pmic_arb_debug_remove,
+	.driver		= {
+		.name	= "spmi_pmic_arb_debug",
+		.of_match_table = spmi_pmic_arb_debug_match_table,
+	},
+};
+
+int __init spmi_pmic_arb_debug_init(void)
+{
+	return platform_driver_register(&spmi_pmic_arb_debug_driver);
+}
+arch_initcall(spmi_pmic_arb_debug_init);
+
+static void __exit spmi_pmic_arb_debug_exit(void)
+{
+	platform_driver_unregister(&spmi_pmic_arb_debug_driver);
+}
+module_exit(spmi_pmic_arb_debug_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spmi_pmic_arb_debug");
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index fabbe76..4d079cd 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -1938,6 +1938,11 @@
 		atmel_uart_writel(port, ATMEL_PDC_TCR, 0);
 		atmel_port->pdc_tx.ofs = 0;
 	}
+	/*
+	 * in uart_flush_buffer(), the xmit circular buffer has just
+	 * been cleared, so we have to reset tx_len accordingly.
+	 */
+	atmel_port->tx_len = 0;
 }
 
 /*
@@ -2471,6 +2476,9 @@
 	pdc_tx = atmel_uart_readl(port, ATMEL_PDC_PTSR) & ATMEL_PDC_TXTEN;
 	atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
 
+	/* Make sure that tx path is actually able to send characters */
+	atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
+
 	uart_console_write(port, s, count, atmel_console_putchar);
 
 	/*
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 7c4654c..5b48323 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -92,7 +92,7 @@
 #define UART_OVERSAMPLING	(32)
 #define STALE_TIMEOUT		(16)
 #define GENI_UART_NR_PORTS	(15)
-#define DEF_FIFO_DEPTH_WORDS	(64)
+#define DEF_FIFO_DEPTH_WORDS	(16)
 #define DEF_FIFO_WIDTH_BITS	(32)
 
 struct msm_geni_serial_port {
@@ -210,8 +210,8 @@
 	unsigned int reg;
 	bool met = false;
 
-	while (iter < 100) {
-		reg = geni_read_reg(uport->membase, offset);
+	while (iter < 1000) {
+		reg = geni_read_reg_nolog(uport->membase, offset);
 		if (reg & bit_field) {
 			met = true;
 			break;
@@ -225,7 +225,7 @@
 static void msm_geni_serial_setup_tx(struct uart_port *uport,
 					unsigned int xmit_size)
 {
-	geni_write_reg(xmit_size, uport->membase, SE_UART_TX_TRANS_LEN);
+	geni_write_reg_nolog(xmit_size, uport->membase, SE_UART_TX_TRANS_LEN);
 	geni_setup_m_cmd(uport->membase, UART_START_TX, 0);
 	/*
 	 * Writes to enable the primary sequencer should go through before
@@ -252,7 +252,7 @@
 								M_CMD_ABORT_EN);
 		}
 	}
-	geni_write_reg(irq_clear, uport->membase, SE_GENI_M_IRQ_CLEAR);
+	geni_write_reg_nolog(irq_clear, uport->membase, SE_GENI_M_IRQ_CLEAR);
 }
 
 #ifdef CONFIG_CONSOLE_POLL
@@ -268,10 +268,12 @@
 		return -ENXIO;
 	}
 
-	m_irq_status = geni_read_reg(uport->membase, SE_GENI_M_IRQ_STATUS);
-	s_irq_status = geni_read_reg(uport->membase, SE_GENI_S_IRQ_STATUS);
-	geni_write_reg(m_irq_status, uport->membase, SE_GENI_M_IRQ_CLEAR);
-	geni_write_reg(s_irq_status, uport->membase, SE_GENI_S_IRQ_CLEAR);
+	m_irq_status = geni_read_reg_nolog(uport->membase,
+						SE_GENI_M_IRQ_STATUS);
+	s_irq_status = geni_read_reg_nolog(uport->membase,
+						SE_GENI_S_IRQ_STATUS);
+	geni_write_reg_nolog(m_irq_status, uport->membase, SE_GENI_M_IRQ_CLEAR);
+	geni_write_reg_nolog(s_irq_status, uport->membase, SE_GENI_S_IRQ_CLEAR);
 
 	if (!(msm_geni_serial_poll_bit(uport, SE_GENI_RX_FIFO_STATUS,
 			RX_FIFO_WC_MSK))) {
@@ -284,7 +286,7 @@
 	 * getting valid RX fifo status.
 	 */
 	mb();
-	rx_fifo = geni_read_reg(uport->membase, SE_GENI_RX_FIFOn);
+	rx_fifo = geni_read_reg_nolog(uport->membase, SE_GENI_RX_FIFOn);
 	rx_fifo &= 0xFF;
 	return rx_fifo;
 }
@@ -296,14 +298,14 @@
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 
 	se_config_packing(uport->membase, 8, 1, false);
-	geni_write_reg(port->tx_wm, uport->membase,
+	geni_write_reg_nolog(port->tx_wm, uport->membase,
 					SE_GENI_TX_WATERMARK_REG);
 	msm_geni_serial_setup_tx(uport, 1);
 	if (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
 				M_TX_FIFO_WATERMARK_EN))
 		WARN_ON(1);
-	geni_write_reg(b, uport->membase, SE_GENI_TX_FIFOn);
-	geni_write_reg(M_TX_FIFO_WATERMARK_EN, uport->membase,
+	geni_write_reg_nolog(b, uport->membase, SE_GENI_TX_FIFOn);
+	geni_write_reg_nolog(M_TX_FIFO_WATERMARK_EN, uport->membase,
 							SE_GENI_M_IRQ_CLEAR);
 	/*
 	 * Ensure FIFO write goes through before polling for status but.
@@ -316,7 +318,7 @@
 #if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
 static void msm_geni_serial_wr_char(struct uart_port *uport, int ch)
 {
-	geni_write_reg(ch, uport->membase, SE_GENI_TX_FIFOn);
+	geni_write_reg_nolog(ch, uport->membase, SE_GENI_TX_FIFOn);
 	/*
 	 * Ensure FIFO write clear goes through before
 	 * next iteration.
@@ -341,7 +343,7 @@
 
 	bytes_to_send += new_line;
 	se_config_packing(uport->membase, 8, 1, false);
-	geni_write_reg(port->tx_wm, uport->membase,
+	geni_write_reg_nolog(port->tx_wm, uport->membase,
 					SE_GENI_TX_WATERMARK_REG);
 	msm_geni_serial_setup_tx(uport, bytes_to_send);
 	i = 0;
@@ -350,7 +352,7 @@
 		u32 avail_fifo_bytes = (port->tx_fifo_depth - port->tx_wm);
 
 		while (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
-							M_TX_FIFO_WATERMARK_EN))
+						M_TX_FIFO_WATERMARK_EN))
 			cpu_relax();
 		chars_to_write = min((unsigned int)(count - i),
 							avail_fifo_bytes);
@@ -358,8 +360,10 @@
 			chars_to_write = (avail_fifo_bytes >> 1);
 		uart_console_write(uport, (s + i), chars_to_write,
 						msm_geni_serial_wr_char);
-		geni_write_reg(M_TX_FIFO_WATERMARK_EN, uport->membase,
+		geni_write_reg_nolog(M_TX_FIFO_WATERMARK_EN, uport->membase,
 							SE_GENI_M_IRQ_CLEAR);
+		/* Ensure this goes through before polling for WM IRQ again.*/
+		mb();
 		i += chars_to_write;
 	}
 	msm_geni_serial_poll_cancel_tx(uport);
@@ -401,7 +405,7 @@
 		int bytes = 4;
 
 		*(msm_port->rx_fifo) =
-			geni_read_reg(uport->membase, SE_GENI_RX_FIFOn);
+			geni_read_reg_nolog(uport->membase, SE_GENI_RX_FIFOn);
 		rx_char = (unsigned char *)msm_port->rx_fifo;
 
 		if (i == (rx_fifo_wc - 1)) {
@@ -437,12 +441,13 @@
 	unsigned int geni_m_irq_en;
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 
-	geni_m_irq_en = geni_read_reg(uport->membase, SE_GENI_M_IRQ_EN);
+	geni_m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
 	geni_m_irq_en |= M_TX_FIFO_WATERMARK_EN;
 
 	se_config_packing(uport->membase, 8, 4, false);
-	geni_write_reg(port->tx_wm, uport->membase, SE_GENI_TX_WATERMARK_REG);
-	geni_write_reg(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
+	geni_write_reg_nolog(port->tx_wm, uport->membase,
+						SE_GENI_TX_WATERMARK_REG);
+	geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
 	/* Geni command setup/irq enables should complete before returning.*/
 	mb();
 }
@@ -452,12 +457,12 @@
 	unsigned int geni_m_irq_en;
 	unsigned int geni_status;
 
-	geni_m_irq_en = geni_read_reg(uport->membase, SE_GENI_M_IRQ_EN);
+	geni_m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
 	geni_m_irq_en &= ~(M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN);
-	geni_write_reg(0, uport->membase, SE_GENI_TX_WATERMARK_REG);
-	geni_write_reg(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
+	geni_write_reg_nolog(0, uport->membase, SE_GENI_TX_WATERMARK_REG);
+	geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
 
-	geni_status = geni_read_reg(uport->membase,
+	geni_status = geni_read_reg_nolog(uport->membase,
 						SE_GENI_STATUS);
 	/* Possible stop tx is called multiple times. */
 	if (!(geni_status & M_GENI_CMD_ACTIVE))
@@ -469,10 +474,10 @@
 		geni_abort_m_cmd(uport->membase);
 		msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
 							M_CMD_ABORT_EN);
-		geni_write_reg(M_CMD_ABORT_EN, uport->membase,
+		geni_write_reg_nolog(M_CMD_ABORT_EN, uport->membase,
 							SE_GENI_M_IRQ_CLEAR);
 	}
-	geni_write_reg(M_CMD_CANCEL_EN, uport, SE_GENI_M_IRQ_CLEAR);
+	geni_write_reg_nolog(M_CMD_CANCEL_EN, uport, SE_GENI_M_IRQ_CLEAR);
 }
 
 static void msm_geni_serial_start_rx(struct uart_port *uport)
@@ -480,16 +485,16 @@
 	unsigned int geni_s_irq_en;
 	unsigned int geni_m_irq_en;
 
-	geni_s_irq_en = geni_read_reg(uport->membase,
+	geni_s_irq_en = geni_read_reg_nolog(uport->membase,
 						SE_GENI_S_IRQ_EN);
-	geni_m_irq_en = geni_read_reg(uport->membase,
+	geni_m_irq_en = geni_read_reg_nolog(uport->membase,
 						SE_GENI_M_IRQ_EN);
 	geni_s_irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
 	geni_m_irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
 
 	geni_setup_s_cmd(uport->membase, UART_START_READ, 0);
-	geni_write_reg(geni_s_irq_en, uport->membase, SE_GENI_S_IRQ_EN);
-	geni_write_reg(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
+	geni_write_reg_nolog(geni_s_irq_en, uport->membase, SE_GENI_S_IRQ_EN);
+	geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
 	/*
 	 * Ensure the writes to the secondary sequencer and interrupt enables
 	 * go through.
@@ -503,21 +508,21 @@
 	unsigned int geni_m_irq_en;
 	unsigned int geni_status;
 
-	geni_s_irq_en = geni_read_reg(uport->membase,
+	geni_s_irq_en = geni_read_reg_nolog(uport->membase,
 						SE_GENI_S_IRQ_EN);
-	geni_m_irq_en = geni_read_reg(uport->membase,
+	geni_m_irq_en = geni_read_reg_nolog(uport->membase,
 						SE_GENI_M_IRQ_EN);
 	geni_s_irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
 	geni_m_irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
 
-	geni_write_reg(geni_s_irq_en, uport->membase, SE_GENI_S_IRQ_EN);
-	geni_write_reg(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
+	geni_write_reg_nolog(geni_s_irq_en, uport->membase, SE_GENI_S_IRQ_EN);
+	geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
 
-	geni_status = geni_read_reg(uport->membase, SE_GENI_STATUS);
+	geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
 	/* Possible stop rx is called multiple times. */
 	if (!(geni_status & S_GENI_CMD_ACTIVE))
 		return;
-	geni_write_reg(S_GENI_CMD_CANCEL, uport->membase,
+	geni_write_reg_nolog(S_GENI_CMD_CANCEL, uport->membase,
 						SE_GENI_S_CMD_CTRL_REG);
 	if (!msm_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
 							S_CMD_CANCEL_EN))
@@ -566,7 +571,7 @@
 	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
 
 	tport = &uport->state->port;
-	rx_fifo_status = geni_read_reg(uport->membase,
+	rx_fifo_status = geni_read_reg_nolog(uport->membase,
 				SE_GENI_RX_FIFO_STATUS);
 	rx_fifo_wc = rx_fifo_status & RX_FIFO_WC_MSK;
 	rx_last_byte_valid = ((rx_fifo_status & RX_LAST_BYTE_VALID_MSK) >>
@@ -590,7 +595,7 @@
 	unsigned int xmit_size;
 	unsigned int fifo_width_bytes = msm_port->tx_fifo_width >> 3;
 
-	tx_fifo_status = geni_read_reg(uport->membase,
+	tx_fifo_status = geni_read_reg_nolog(uport->membase,
 					SE_GENI_TX_FIFO_STATUS);
 	if (uart_circ_empty(xmit) && !tx_fifo_status) {
 		msm_geni_serial_stop_tx(uport);
@@ -621,7 +626,7 @@
 
 		for (c = 0; c < tx_bytes ; c++)
 			buf |= (xmit->buf[xmit->tail + c] << (c * 8));
-		geni_write_reg(buf, uport->membase, SE_GENI_TX_FIFOn);
+		geni_write_reg_nolog(buf, uport->membase, SE_GENI_TX_FIFOn);
 		xmit->tail = (xmit->tail + tx_bytes) & (UART_XMIT_SIZE - 1);
 		i += tx_bytes;
 		uport->icount.tx += tx_bytes;
@@ -642,10 +647,12 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&uport->lock, flags);
-	m_irq_status = geni_read_reg(uport->membase, SE_GENI_M_IRQ_STATUS);
-	s_irq_status = geni_read_reg(uport->membase, SE_GENI_S_IRQ_STATUS);
-	geni_write_reg(m_irq_status, uport->membase, SE_GENI_M_IRQ_CLEAR);
-	geni_write_reg(s_irq_status, uport->membase, SE_GENI_S_IRQ_CLEAR);
+	m_irq_status = geni_read_reg_nolog(uport->membase,
+							SE_GENI_M_IRQ_STATUS);
+	s_irq_status = geni_read_reg_nolog(uport->membase,
+							SE_GENI_S_IRQ_STATUS);
+	geni_write_reg_nolog(m_irq_status, uport->membase, SE_GENI_M_IRQ_CLEAR);
+	geni_write_reg_nolog(s_irq_status, uport->membase, SE_GENI_S_IRQ_CLEAR);
 
 	if ((m_irq_status & M_ILLEGAL_CMD_EN)) {
 		WARN_ON(1);
@@ -814,17 +821,24 @@
 		u32 rx_parity_cfg, u32 bits_per_char, u32 stop_bit_len,
 		u32 rxstale, u32 s_clk_cfg)
 {
-	geni_write_reg(loopback, uport->membase, SE_UART_LOOPBACK_CFG);
-	geni_write_reg(tx_trans_cfg, uport->membase, SE_UART_TX_TRANS_CFG);
-	geni_write_reg(tx_parity_cfg, uport->membase, SE_UART_TX_PARITY_CFG);
-	geni_write_reg(rx_trans_cfg, uport->membase, SE_UART_RX_TRANS_CFG);
-	geni_write_reg(rx_parity_cfg, uport->membase, SE_UART_RX_PARITY_CFG);
-	geni_write_reg(bits_per_char, uport->membase, SE_UART_TX_WORD_LEN);
-	geni_write_reg(bits_per_char, uport->membase, SE_UART_RX_WORD_LEN);
-	geni_write_reg(stop_bit_len, uport->membase, SE_UART_TX_STOP_BIT_LEN);
-	geni_write_reg(rxstale, uport->membase, SE_UART_RX_STALE_CNT);
-	geni_write_reg(s_clk_cfg, uport->membase, GENI_SER_M_CLK_CFG);
-	geni_write_reg(s_clk_cfg, uport->membase, GENI_SER_S_CLK_CFG);
+	geni_write_reg_nolog(loopback, uport->membase, SE_UART_LOOPBACK_CFG);
+	geni_write_reg_nolog(tx_trans_cfg, uport->membase,
+							SE_UART_TX_TRANS_CFG);
+	geni_write_reg_nolog(tx_parity_cfg, uport->membase,
+							SE_UART_TX_PARITY_CFG);
+	geni_write_reg_nolog(rx_trans_cfg, uport->membase,
+							SE_UART_RX_TRANS_CFG);
+	geni_write_reg_nolog(rx_parity_cfg, uport->membase,
+							SE_UART_RX_PARITY_CFG);
+	geni_write_reg_nolog(bits_per_char, uport->membase,
+							SE_UART_TX_WORD_LEN);
+	geni_write_reg_nolog(bits_per_char, uport->membase,
+							SE_UART_RX_WORD_LEN);
+	geni_write_reg_nolog(stop_bit_len, uport->membase,
+						SE_UART_TX_STOP_BIT_LEN);
+	geni_write_reg_nolog(rxstale, uport->membase, SE_UART_RX_STALE_CNT);
+	geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_M_CLK_CFG);
+	geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_S_CLK_CFG);
 }
 
 static int get_clk_div_rate(unsigned int baud, unsigned long *desired_clk_rate)
@@ -843,6 +857,7 @@
 	}
 
 	clk_div = ser_clk / *desired_clk_rate;
+	*desired_clk_rate = ser_clk;
 exit_get_clk_div_rate:
 	return clk_div;
 }
@@ -875,10 +890,14 @@
 	ser_clk_cfg |= (clk_div << CLK_DIV_SHFT);
 
 	/* parity */
-	tx_trans_cfg = geni_read_reg(uport->membase, SE_UART_TX_TRANS_CFG);
-	tx_parity_cfg = geni_read_reg(uport->membase, SE_UART_TX_PARITY_CFG);
-	rx_trans_cfg = geni_read_reg(uport->membase, SE_UART_RX_TRANS_CFG);
-	rx_parity_cfg = geni_read_reg(uport->membase, SE_UART_RX_PARITY_CFG);
+	tx_trans_cfg = geni_read_reg_nolog(uport->membase,
+							SE_UART_TX_TRANS_CFG);
+	tx_parity_cfg = geni_read_reg_nolog(uport->membase,
+							SE_UART_TX_PARITY_CFG);
+	rx_trans_cfg = geni_read_reg_nolog(uport->membase,
+							SE_UART_RX_TRANS_CFG);
+	rx_parity_cfg = geni_read_reg_nolog(uport->membase,
+							SE_UART_RX_PARITY_CFG);
 	if (termios->c_cflag & PARENB) {
 		tx_trans_cfg |= UART_TX_PAR_EN;
 		rx_trans_cfg |= UART_RX_PAR_EN;
@@ -947,7 +966,8 @@
 	unsigned int tx_fifo_status;
 	unsigned int is_tx_empty = 1;
 
-	tx_fifo_status = geni_read_reg(port->membase, SE_GENI_TX_FIFO_STATUS);
+	tx_fifo_status = geni_read_reg_nolog(port->membase,
+						SE_GENI_TX_FIFO_STATUS);
 	if (tx_fifo_status)
 		is_tx_empty = 0;
 
@@ -1264,9 +1284,6 @@
 		goto exit_geni_serial_probe;
 	}
 
-	/* Default core clk to 115200 Baud */
-	clk_set_rate(dev_port->serial_rsc.se_clk, (115200 * UART_OVERSAMPLING));
-	uport->uartclk = clk_get_rate(dev_port->serial_rsc.se_clk);
 	dev_port->tx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
 	dev_port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
 	dev_port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c
index 770454e..07390f8 100644
--- a/drivers/tty/serial/mxs-auart.c
+++ b/drivers/tty/serial/mxs-auart.c
@@ -1085,7 +1085,7 @@
 					AUART_LINECTRL_BAUD_DIV_MAX);
 		baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN;
 		baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max);
-		div = u->uartclk * 32 / baud;
+		div = DIV_ROUND_CLOSEST(u->uartclk * 32, baud);
 	}
 
 	ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index cf25708..ff45ebf 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -520,8 +520,10 @@
 	 */
 	tbuf_size =  max_t(u16, sizeof(struct usb_hub_descriptor), wLength);
 	tbuf = kzalloc(tbuf_size, GFP_KERNEL);
-	if (!tbuf)
-		return -ENOMEM;
+	if (!tbuf) {
+		status = -ENOMEM;
+		goto err_alloc;
+	}
 
 	bufp = tbuf;
 
@@ -734,6 +736,7 @@
 	}
 
 	kfree(tbuf);
+ err_alloc:
 
 	/* any errors get returned through the urb completion */
 	spin_lock_irq(&hcd_root_hub_lock);
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 33e3d9f..3e459b0 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1064,56 +1064,17 @@
 
 int dwc3_core_pre_init(struct dwc3 *dwc)
 {
-	int ret;
+	int ret = 0;
 
 	dwc3_cache_hwparams(dwc);
-
-	ret = dwc3_phy_setup(dwc);
-	if (ret)
-		goto err0;
-
 	if (!dwc->ev_buf) {
 		ret = dwc3_alloc_event_buffers(dwc, DWC3_EVENT_BUFFERS_SIZE);
 		if (ret) {
 			dev_err(dwc->dev, "failed to allocate event buffers\n");
 			ret = -ENOMEM;
-			goto err1;
 		}
 	}
 
-	ret = dwc3_core_init(dwc);
-	if (ret) {
-		dev_err(dwc->dev, "failed to initialize core\n");
-		goto err2;
-	}
-
-	ret = phy_power_on(dwc->usb2_generic_phy);
-	if (ret < 0)
-		goto err3;
-
-	ret = phy_power_on(dwc->usb3_generic_phy);
-	if (ret < 0)
-		goto err4;
-
-	ret = dwc3_event_buffers_setup(dwc);
-	if (ret) {
-		dev_err(dwc->dev, "failed to setup event buffers\n");
-		goto err5;
-	}
-
-	return ret;
-
-err5:
-	phy_power_off(dwc->usb3_generic_phy);
-err4:
-	phy_power_off(dwc->usb2_generic_phy);
-err3:
-	dwc3_core_exit(dwc);
-err2:
-	dwc3_free_event_buffers(dwc);
-err1:
-	dwc3_ulpi_exit(dwc);
-err0:
 	return ret;
 }
 
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 96684f4..f601d76 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -2371,9 +2371,47 @@
 {
 	struct dwc3_msm *mdwc = container_of(w, struct dwc3_msm, resume_work);
 	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+	union extcon_property_value val;
+	unsigned int extcon_id;
+	struct extcon_dev *edev = NULL;
+	int ret = 0;
 
 	dev_dbg(mdwc->dev, "%s: dwc3 resume work\n", __func__);
 
+	if (mdwc->vbus_active) {
+		edev = mdwc->extcon_vbus;
+		extcon_id = EXTCON_USB;
+	} else if (mdwc->id_state == DWC3_ID_GROUND) {
+		edev = mdwc->extcon_id;
+		extcon_id = EXTCON_USB_HOST;
+	}
+
+	/* Check speed and Type-C polarity values in order to configure PHY */
+	if (edev && extcon_get_state(edev, extcon_id)) {
+		ret = extcon_get_property(edev, extcon_id,
+					EXTCON_PROP_USB_SS, &val);
+
+		/* Use default dwc->maximum_speed if speed isn't reported */
+		if (!ret)
+			dwc->maximum_speed = (val.intval == 0) ?
+					USB_SPEED_HIGH : USB_SPEED_SUPER;
+
+		if (dwc->maximum_speed > dwc->max_hw_supp_speed)
+			dwc->maximum_speed = dwc->max_hw_supp_speed;
+
+		dbg_event(0xFF, "speed", dwc->maximum_speed);
+
+		ret = extcon_get_property(edev, extcon_id,
+				EXTCON_PROP_USB_TYPEC_POLARITY, &val);
+		if (ret)
+			mdwc->typec_orientation = ORIENTATION_NONE;
+		else
+			mdwc->typec_orientation = val.intval ?
+					ORIENTATION_CC2 : ORIENTATION_CC1;
+
+		dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
+	}
+
 	/*
 	 * exit LPM first to meet resume timeline from device side.
 	 * resume_pending flag would prevent calling
@@ -2617,45 +2655,18 @@
 {
 	struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
 	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
-	struct extcon_dev *edev = ptr;
 	enum dwc3_id_state id;
-	int cc_state;
-	int speed;
-
-	if (!edev) {
-		dev_err(mdwc->dev, "%s: edev null\n", __func__);
-		goto done;
-	}
 
 	id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
 
 	dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
 
-	cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
-	if (cc_state < 0)
-		mdwc->typec_orientation = ORIENTATION_NONE;
-	else
-		mdwc->typec_orientation =
-			cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
-
-	dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
-
-	speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
-	/* Use default dwc->maximum_speed if extcon doesn't report speed. */
-	if (speed >= 0)
-		dwc->maximum_speed =
-			(speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
-
-	if (dwc->maximum_speed > dwc->max_hw_supp_speed)
-		dwc->maximum_speed = dwc->max_hw_supp_speed;
-
 	if (mdwc->id_state != id) {
 		mdwc->id_state = id;
 		dbg_event(0xFF, "id_state", mdwc->id_state);
 		queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
 	}
 
-done:
 	return NOTIFY_DONE;
 }
 
@@ -2664,44 +2675,19 @@
 {
 	struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
 	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
-	struct extcon_dev *edev = ptr;
-	int cc_state;
-	int speed;
-
-	if (!edev) {
-		dev_err(mdwc->dev, "%s: edev null\n", __func__);
-		goto done;
-	}
 
 	dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
 
 	if (mdwc->vbus_active == event)
 		return NOTIFY_DONE;
 
-	cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
-	if (cc_state < 0)
-		mdwc->typec_orientation = ORIENTATION_NONE;
-	else
-		mdwc->typec_orientation =
-			cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
-
-	dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
-
-	speed = extcon_get_cable_state_(edev, EXTCON_USB_SPEED);
-	/* Use default dwc->maximum_speed if extcon doesn't report speed. */
-	if (speed >= 0)
-		dwc->maximum_speed =
-			(speed == 0) ? USB_SPEED_HIGH : USB_SPEED_SUPER;
-
-	if (dwc->maximum_speed > dwc->max_hw_supp_speed)
-		dwc->maximum_speed = dwc->max_hw_supp_speed;
-
 	mdwc->vbus_active = event;
 	if (dwc->is_drd && !mdwc->in_restart)
 		queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
-done:
+
 	return NOTIFY_DONE;
 }
+
 /*
  * Handle EUD based soft detach/attach event, and force USB high speed mode
  * functionality on receiving soft attach event.
@@ -2717,12 +2703,6 @@
 {
 	struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, eud_event_nb);
 	struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
-	struct extcon_dev *edev = ptr;
-
-	if (!edev) {
-		dev_err(mdwc->dev, "%s: edev null\n", __func__);
-		goto done;
-	}
 
 	dbg_event(0xFF, "EUD_NB", event);
 	dev_dbg(mdwc->dev, "eud:%ld event received\n", event);
@@ -2735,7 +2715,7 @@
 	mdwc->vbus_active = event;
 	if (dwc->is_drd && !mdwc->in_restart)
 		queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
-done:
+
 	return NOTIFY_DONE;
 }
 
@@ -3276,10 +3256,10 @@
 	}
 
 	/* Update initial VBUS/ID state from extcon */
-	if (mdwc->extcon_vbus && extcon_get_cable_state_(mdwc->extcon_vbus,
+	if (mdwc->extcon_vbus && extcon_get_state(mdwc->extcon_vbus,
 							EXTCON_USB))
 		dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
-	else if (mdwc->extcon_id && extcon_get_cable_state_(mdwc->extcon_id,
+	else if (mdwc->extcon_id && extcon_get_state(mdwc->extcon_id,
 							EXTCON_USB_HOST))
 		dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
 	else if (!pval.intval) {
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index e55ebcb4..54e14b1 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -450,13 +450,23 @@
 	struct fsg_buffhd	*bh = req->context;
 
 	if (req->status || req->actual != req->length)
-		DBG(common, "%s --> %d, %u/%u\n", __func__,
+		pr_debug("%s --> %d, %u/%u\n", __func__,
 		    req->status, req->actual, req->length);
 	if (req->status == -ECONNRESET)		/* Request was cancelled */
 		usb_ep_fifo_flush(ep);
 
 	/* Hold the lock while we update the request and buffer states */
 	smp_wmb();
+	/*
+	 * Disconnect and completion might race each other and driver data
+	 * is set to NULL during ep disable. So, add a check if that is case.
+	 */
+	if (!common) {
+		bh->inreq_busy = 0;
+		bh->state = BUF_STATE_EMPTY;
+		return;
+	}
+
 	spin_lock(&common->lock);
 	bh->inreq_busy = 0;
 	bh->state = BUF_STATE_EMPTY;
@@ -469,15 +479,24 @@
 	struct fsg_common	*common = ep->driver_data;
 	struct fsg_buffhd	*bh = req->context;
 
-	dump_msg(common, "bulk-out", req->buf, req->actual);
 	if (req->status || req->actual != bh->bulk_out_intended_length)
-		DBG(common, "%s --> %d, %u/%u\n", __func__,
+		pr_debug("%s --> %d, %u/%u\n", __func__,
 		    req->status, req->actual, bh->bulk_out_intended_length);
 	if (req->status == -ECONNRESET)		/* Request was cancelled */
 		usb_ep_fifo_flush(ep);
 
 	/* Hold the lock while we update the request and buffer states */
 	smp_wmb();
+	/*
+	 * Disconnect and completion might race each other and driver data
+	 * is set to NULL during ep disable. So, add a check if that is case.
+	 */
+	if (!common) {
+		bh->outreq_busy = 0;
+		return;
+	}
+
+	dump_msg(common, "bulk-out", req->buf, req->actual);
 	spin_lock(&common->lock);
 	bh->outreq_busy = 0;
 	bh->state = BUF_STATE_FULL;
@@ -2271,8 +2290,11 @@
 	}
 
 	common->running = 0;
-	if (!new_fsg || rc)
+	if (!new_fsg || rc) {
+		/* allow usb LPM after eps are disabled */
+		usb_gadget_autopm_put_async(common->gadget);
 		return rc;
+	}
 
 	common->fsg = new_fsg;
 	fsg = common->fsg;
@@ -2330,6 +2352,10 @@
 {
 	struct fsg_dev *fsg = fsg_from_func(f);
 	fsg->common->new_fsg = fsg;
+
+	/* prevents usb LPM until thread runs to completion */
+	usb_gadget_autopm_get_async(fsg->common->gadget);
+
 	raise_exception(fsg->common, FSG_STATE_CONFIG_CHANGE);
 	return USB_GADGET_DELAYED_STATUS;
 }
@@ -2472,8 +2498,14 @@
 
 	case FSG_STATE_CONFIG_CHANGE:
 		do_set_interface(common, common->new_fsg);
-		if (common->new_fsg)
+		if (common->new_fsg) {
+			/*
+			 * make sure delayed_status flag updated when set_alt
+			 * returned.
+			 */
+			msleep(200);
 			usb_composite_setup_continue(common->cdev);
+		}
 		break;
 
 	case FSG_STATE_EXIT:
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index fbe6910..217b7ca 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1238,7 +1238,7 @@
 	opts->func_inst.free_func_inst = f_midi_free_inst;
 	opts->index = SNDRV_DEFAULT_IDX1;
 	opts->id = SNDRV_DEFAULT_STR1;
-	opts->buflen = 512;
+	opts->buflen = 1024;
 	opts->qlen = 32;
 	opts->in_ports = 1;
 	opts->out_ports = 1;
diff --git a/drivers/usb/gadget/function/f_qdss.c b/drivers/usb/gadget/function/f_qdss.c
index 7114784..17f6f60 100644
--- a/drivers/usb/gadget/function/f_qdss.c
+++ b/drivers/usb/gadget/function/f_qdss.c
@@ -493,11 +493,7 @@
 				NULL,
 				NULL);
 
-		status = set_qdss_data_connection(
-				qdss->gadget,
-				qdss->port.data,
-				qdss->port.data->address,
-				0);
+		status = set_qdss_data_connection(qdss, 0);
 		if (status)
 			pr_err("qdss_disconnect error");
 	}
@@ -543,11 +539,7 @@
 	}
 
 	pr_debug("usb_qdss_connect_work\n");
-	status = set_qdss_data_connection(
-			qdss->gadget,
-			qdss->port.data,
-			qdss->port.data->address,
-			1);
+	status = set_qdss_data_connection(qdss, 1);
 	if (status) {
 		pr_err("set_qdss_data_connection error(%d)", status);
 		return;
@@ -868,14 +860,9 @@
 	if (status)
 		pr_err("%s: uninit_data error\n", __func__);
 
-	status = set_qdss_data_connection(
-				gadget,
-				qdss->port.data,
-				qdss->port.data->address,
-				0);
+	status = set_qdss_data_connection(qdss, 0);
 	if (status)
 		pr_err("%s:qdss_disconnect error\n", __func__);
-	usb_gadget_restart(gadget);
 }
 EXPORT_SYMBOL(usb_qdss_close);
 
diff --git a/drivers/usb/gadget/function/f_qdss.h b/drivers/usb/gadget/function/f_qdss.h
index e673e61..4ba2e9b 100644
--- a/drivers/usb/gadget/function/f_qdss.h
+++ b/drivers/usb/gadget/function/f_qdss.h
@@ -72,6 +72,5 @@
 };
 
 int uninit_data(struct usb_ep *ep);
-int set_qdss_data_connection(struct usb_gadget *gadget,
-	struct usb_ep *data_ep, u8 data_addr, int enable);
+int set_qdss_data_connection(struct f_qdss *qdss, int enable);
 #endif
diff --git a/drivers/usb/gadget/function/rndis.c b/drivers/usb/gadget/function/rndis.c
index ed93f9d..38d58f3 100644
--- a/drivers/usb/gadget/function/rndis.c
+++ b/drivers/usb/gadget/function/rndis.c
@@ -538,14 +538,11 @@
 		 */
 		retval = 0;
 		if (*params->filter) {
-			params->state = RNDIS_DATA_INITIALIZED;
-			netif_carrier_on(params->dev);
-			if (netif_running(params->dev))
-				netif_wake_queue(params->dev);
+			pr_debug("%s(): disable flow control\n", __func__);
+			rndis_flow_control(params, false);
 		} else {
-			params->state = RNDIS_INITIALIZED;
-			netif_carrier_off(params->dev);
-			netif_stop_queue(params->dev);
+			pr_err("%s(): enable flow control\n", __func__);
+			rndis_flow_control(params, true);
 		}
 		break;
 
@@ -690,12 +687,6 @@
 {
 	rndis_reset_cmplt_type *resp;
 	rndis_resp_t *r;
-	u8 *xbuf;
-	u32 length;
-
-	/* drain the response queue */
-	while ((xbuf = rndis_get_next_response(params, &length)))
-		rndis_free_response(params, xbuf);
 
 	r = rndis_add_response(params, sizeof(rndis_reset_cmplt_type));
 	if (!r)
diff --git a/drivers/usb/gadget/function/u_qdss.c b/drivers/usb/gadget/function/u_qdss.c
index c781d85..06eecd1 100644
--- a/drivers/usb/gadget/function/u_qdss.c
+++ b/drivers/usb/gadget/function/u_qdss.c
@@ -40,19 +40,25 @@
 }
 
 static int init_data(struct usb_ep *ep);
-int set_qdss_data_connection(struct usb_gadget *gadget,
-	struct usb_ep *data_ep, u8 data_addr, int enable)
+int set_qdss_data_connection(struct f_qdss *qdss, int enable)
 {
 	enum usb_ctrl		usb_bam_type;
 	int			res = 0;
 	int			idx;
-	struct f_qdss *qdss = data_ep->driver_data;
-	struct usb_qdss_bam_connect_info bam_info = qdss->bam_info;
+	struct usb_qdss_bam_connect_info bam_info;
+	struct usb_gadget *gadget;
 
 	pr_debug("set_qdss_data_connection\n");
 
+	if (!qdss) {
+		pr_err("%s: qdss ptr is NULL\n", __func__);
+		return -EINVAL;
+	}
+
+	gadget = qdss->gadget;
 	usb_bam_type = usb_bam_get_bam_type(gadget->name);
 
+	bam_info = qdss->bam_info;
 	/* There is only one qdss pipe, so the pipe number can be set to 0 */
 	idx = usb_bam_get_connection_idx(usb_bam_type, QDSS_P_BAM,
 		PEER_PERIPHERAL_TO_USB, USB_BAM_DEVICE, 0);
@@ -67,14 +73,16 @@
 			kzalloc(sizeof(struct sps_mem_buffer), GFP_KERNEL);
 		if (!bam_info.data_fifo) {
 			pr_err("qdss_data_connection: memory alloc failed\n");
+			usb_bam_free_fifos(usb_bam_type, idx);
 			return -ENOMEM;
 		}
 		get_bam2bam_connection_info(usb_bam_type, idx,
 				&bam_info.usb_bam_pipe_idx,
 				NULL, bam_info.data_fifo, NULL);
 
-		alloc_sps_req(data_ep);
-		msm_data_fifo_config(data_ep, bam_info.data_fifo->phys_base,
+		alloc_sps_req(qdss->port.data);
+		msm_data_fifo_config(qdss->port.data,
+					bam_info.data_fifo->phys_base,
 					bam_info.data_fifo->size,
 					bam_info.usb_bam_pipe_idx);
 		init_data(qdss->port.data);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 374750f..b59efd2 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1507,6 +1507,11 @@
 	/* Some devices get this wrong */
 	if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH)
 		max_packet = 512;
+
+	if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_FULL
+				&& max_packet < 8)
+		max_packet = 8;
+
 	/* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
 	if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
 		avg_trb_len = 8;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index fa1323b..56bb4b1 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -329,6 +329,8 @@
 	if (ret)
 		goto disable_usb_phy;
 
+	device_wakeup_enable(&hcd->self.root_hub->dev);
+
 	if (HCC_MAX_PSA(xhci->hcc_params) >= 4)
 		xhci->shared_hcd->can_do_streams = 1;
 
@@ -336,6 +338,8 @@
 	if (ret)
 		goto dealloc_usb2_hcd;
 
+	device_wakeup_enable(&xhci->shared_hcd->self.root_hub->dev);
+
 	/* override imod interval if specified */
 	if (imod) {
 		imod &= ER_IRQ_INTERVAL_MASK;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index e6e985d..ec9ff3e 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -113,12 +113,21 @@
 
 	ret = xhci_handshake(&xhci->op_regs->status,
 			STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
-	if (!ret) {
+	if (!ret)
 		xhci->xhc_state |= XHCI_STATE_HALTED;
-		xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
-	} else
+	else
 		xhci_warn(xhci, "Host not halted after %u microseconds.\n",
 				XHCI_MAX_HALT_USEC);
+
+	xhci->cmd_ring_state = CMD_RING_STATE_STOPPED;
+
+	if (delayed_work_pending(&xhci->cmd_timer)) {
+		xhci_dbg_trace(xhci, trace_xhci_dbg_init,
+				"Cleanup command queue");
+		cancel_delayed_work(&xhci->cmd_timer);
+		xhci_cleanup_command_queue(xhci);
+	}
+
 	return ret;
 }
 
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index da08047..e19e963 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -373,8 +373,6 @@
 static const unsigned int usbpd_extcon_cable[] = {
 	EXTCON_USB,
 	EXTCON_USB_HOST,
-	EXTCON_USB_CC,
-	EXTCON_USB_SPEED,
 	EXTCON_NONE,
 };
 
@@ -397,32 +395,43 @@
 
 static inline void stop_usb_host(struct usbpd *pd)
 {
-	extcon_set_cable_state_(pd->extcon, EXTCON_USB_HOST, 0);
+	extcon_set_state_sync(pd->extcon, EXTCON_USB_HOST, 0);
 }
 
 static inline void start_usb_host(struct usbpd *pd, bool ss)
 {
 	enum plug_orientation cc = usbpd_get_plug_orientation(pd);
+	union extcon_property_value val;
 
-	extcon_set_cable_state_(pd->extcon, EXTCON_USB_CC,
-			cc == ORIENTATION_CC2);
-	extcon_set_cable_state_(pd->extcon, EXTCON_USB_SPEED, ss);
-	extcon_set_cable_state_(pd->extcon, EXTCON_USB_HOST, 1);
+	val.intval = (cc == ORIENTATION_CC2);
+	extcon_set_property(pd->extcon, EXTCON_USB_HOST,
+			EXTCON_PROP_USB_TYPEC_POLARITY, val);
+
+	val.intval = ss;
+	extcon_set_property(pd->extcon, EXTCON_USB_HOST,
+			EXTCON_PROP_USB_SS, val);
+
+	extcon_set_state_sync(pd->extcon, EXTCON_USB_HOST, 1);
 }
 
 static inline void stop_usb_peripheral(struct usbpd *pd)
 {
-	extcon_set_cable_state_(pd->extcon, EXTCON_USB, 0);
+	extcon_set_state_sync(pd->extcon, EXTCON_USB, 0);
 }
 
 static inline void start_usb_peripheral(struct usbpd *pd)
 {
 	enum plug_orientation cc = usbpd_get_plug_orientation(pd);
+	union extcon_property_value val;
 
-	extcon_set_cable_state_(pd->extcon, EXTCON_USB_CC,
-			cc == ORIENTATION_CC2);
-	extcon_set_cable_state_(pd->extcon, EXTCON_USB_SPEED, 1);
-	extcon_set_cable_state_(pd->extcon, EXTCON_USB, 1);
+	val.intval = (cc == ORIENTATION_CC2);
+	extcon_set_property(pd->extcon, EXTCON_USB,
+			EXTCON_PROP_USB_TYPEC_POLARITY, val);
+
+	val.intval = 1;
+	extcon_set_property(pd->extcon, EXTCON_USB, EXTCON_PROP_USB_SS, val);
+
+	extcon_set_state_sync(pd->extcon, EXTCON_USB, 1);
 }
 
 static int set_power_role(struct usbpd *pd, enum power_role pr)
@@ -1817,7 +1826,7 @@
 			regulator_disable(pd->vbus);
 
 		if (pd->current_dr != DR_DFP) {
-			extcon_set_cable_state_(pd->extcon, EXTCON_USB, 0);
+			extcon_set_state_sync(pd->extcon, EXTCON_USB, 0);
 			pd->current_dr = DR_DFP;
 			pd_phy_update_roles(pd->current_dr, pd->current_pr);
 		}
@@ -3207,6 +3216,16 @@
 		goto put_psy;
 	}
 
+	/* Support reporting polarity and speed via properties */
+	extcon_set_property_capability(pd->extcon, EXTCON_USB,
+			EXTCON_PROP_USB_TYPEC_POLARITY);
+	extcon_set_property_capability(pd->extcon, EXTCON_USB,
+			EXTCON_PROP_USB_SS);
+	extcon_set_property_capability(pd->extcon, EXTCON_USB_HOST,
+			EXTCON_PROP_USB_TYPEC_POLARITY);
+	extcon_set_property_capability(pd->extcon, EXTCON_USB_HOST,
+			EXTCON_PROP_USB_SS);
+
 	pd->vbus = devm_regulator_get(parent, "vbus");
 	if (IS_ERR(pd->vbus)) {
 		ret = PTR_ERR(pd->vbus);
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index c59e33f..4f0a455 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -50,6 +50,15 @@
 #define QUSB2PHY_PORT_TUNE1		0x23c
 #define QUSB2PHY_TEST1			0x24C
 
+#define QUSB2PHY_PLL_CORE_INPUT_OVERRIDE 0x0a8
+#define CORE_PLL_RATE			BIT(0)
+#define CORE_PLL_RATE_MUX		BIT(1)
+#define CORE_PLL_EN			BIT(2)
+#define CORE_PLL_EN_MUX			BIT(3)
+#define CORE_PLL_EN_FROM_RESET		BIT(4)
+#define CORE_RESET			BIT(5)
+#define CORE_RESET_MUX			BIT(6)
+
 #define QUSB2PHY_1P8_VOL_MIN           1800000 /* uV */
 #define QUSB2PHY_1P8_VOL_MAX           1800000 /* uV */
 #define QUSB2PHY_1P8_HPM_LOAD          30000   /* uA */
@@ -330,22 +339,30 @@
 	}
 }
 
+static void qusb_phy_reset(struct qusb_phy *qphy)
+{
+	int ret;
+
+	ret = reset_control_assert(qphy->phy_reset);
+	if (ret)
+		dev_err(qphy->phy.dev, "%s: phy_reset assert failed\n",
+								__func__);
+	usleep_range(100, 150);
+
+	ret = reset_control_deassert(qphy->phy_reset);
+	if (ret)
+		dev_err(qphy->phy.dev, "%s: phy_reset deassert failed\n",
+							__func__);
+}
+
 static void qusb_phy_host_init(struct usb_phy *phy)
 {
 	u8 reg;
-	int ret;
 	struct qusb_phy *qphy = container_of(phy, struct qusb_phy, phy);
 
 	dev_dbg(phy->dev, "%s\n", __func__);
 
-	/* Perform phy reset */
-	ret = reset_control_assert(qphy->phy_reset);
-	if (ret)
-		dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__);
-	usleep_range(100, 150);
-	ret = reset_control_deassert(qphy->phy_reset);
-		dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__);
-
+	qusb_phy_reset(qphy);
 	qusb_phy_write_seq(qphy->base, qphy->qusb_phy_host_init_seq,
 			qphy->host_init_seq_len, 0);
 
@@ -377,15 +394,7 @@
 
 	qusb_phy_enable_clocks(qphy, true);
 
-	/* Perform phy reset */
-	ret = reset_control_assert(qphy->phy_reset);
-	if (ret)
-		dev_err(phy->dev, "%s: phy_reset assert failed\n", __func__);
-	usleep_range(100, 150);
-	ret = reset_control_deassert(qphy->phy_reset);
-	if (ret)
-		dev_err(phy->dev, "%s: phy_reset deassert failed\n", __func__);
-
+	qusb_phy_reset(qphy);
 	if (qphy->emulation) {
 		if (qphy->emu_init_seq)
 			qusb_phy_write_seq(qphy->emu_phy_base + 0x8000,
@@ -537,6 +546,11 @@
 			writel_relaxed(intr_mask,
 				qphy->base + QUSB2PHY_INTR_CTRL);
 
+			/* hold core PLL into reset */
+			writel_relaxed(CORE_PLL_EN_FROM_RESET |
+				CORE_RESET | CORE_RESET_MUX,
+				qphy->base + QUSB2PHY_PLL_CORE_INPUT_OVERRIDE);
+
 			/* enable phy auto-resume */
 			writel_relaxed(0x91,
 					qphy->base + QUSB2PHY_TEST1);
@@ -555,14 +569,7 @@
 			/* Disable all interrupts */
 			writel_relaxed(0x00,
 				qphy->base + QUSB2PHY_INTR_CTRL);
-
-			/* Put PHY into non-driving mode */
-			writel_relaxed(0x23,
-				qphy->base + QUSB2PHY_PWR_CTRL1);
-
-			/* Makes sure that above write goes through */
-			wmb();
-
+			qusb_phy_reset(qphy);
 			qusb_phy_enable_clocks(qphy, false);
 			qusb_phy_enable_power(qphy, false, true);
 		}
@@ -576,6 +583,10 @@
 			writel_relaxed(0x00,
 				qphy->base + QUSB2PHY_INTR_CTRL);
 
+			/* bring core PLL out of reset */
+			writel_relaxed(CORE_PLL_EN_FROM_RESET,
+				qphy->base + QUSB2PHY_PLL_CORE_INPUT_OVERRIDE);
+
 			/* Makes sure that above write goes through */
 			wmb();
 		} else { /* Cable connect case */
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index ee521a0..9ef34c3 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -46,13 +46,39 @@
 #define ALFPS_DTCT_EN		BIT(1)
 #define ARCVR_DTCT_EVENT_SEL	BIT(4)
 
-/* PCIE_USB3_PHY_PCS_MISC_TYPEC_CTRL bits */
+/*
+ * register bits
+ * PCIE_USB3_PHY_PCS_MISC_TYPEC_CTRL - for QMP USB PHY
+ * USB3_DP_COM_PHY_MODE_CTRL - for QMP USB DP Combo PHY
+ */
 
 /* 0 - selects Lane A. 1 - selects Lane B */
 #define SW_PORTSELECT		BIT(0)
 /* port select mux: 1 - sw control. 0 - HW control*/
 #define SW_PORTSELECT_MX	BIT(1)
 
+/* USB3_DP_PHY_USB3_DP_COM_SWI_CTRL bits */
+
+/* LANE related register read/write with USB3 */
+#define USB3_SWI_ACT_ACCESS_EN	BIT(0)
+/* LANE related register read/write with DP */
+#define DP_SWI_ACT_ACCESS_EN	BIT(1)
+
+/* USB3_DP_COM_RESET_OVRD_CTRL bits */
+
+/* DP PHY soft reset */
+#define SW_DPPHY_RESET		BIT(0)
+/* mux to select DP PHY reset control, 0:HW control, 1: software reset */
+#define SW_DPPHY_RESET_MUX	BIT(1)
+/* USB3 PHY soft reset */
+#define SW_USB3PHY_RESET	BIT(2)
+/* mux to select USB3 PHY reset control, 0:HW control, 1: software reset */
+#define SW_USB3PHY_RESET_MUX	BIT(3)
+
+/* USB3_DP_COM_PHY_MODE_CTRL bits */
+#define USB3_MODE		BIT(0) /* enables USB3 mode */
+#define DP_MODE			BIT(1) /* enables DP mode */
+
 enum qmp_phy_rev_reg {
 	USB3_PHY_PCS_STATUS,
 	USB3_PHY_AUTONOMOUS_MODE_CTRL,
@@ -60,6 +86,17 @@
 	USB3_PHY_POWER_DOWN_CONTROL,
 	USB3_PHY_SW_RESET,
 	USB3_PHY_START,
+
+	/* USB DP Combo PHY related */
+	USB3_DP_DP_PHY_PD_CTL,
+	USB3_DP_COM_POWER_DOWN_CTRL,
+	USB3_DP_COM_SW_RESET,
+	USB3_DP_COM_RESET_OVRD_CTRL,
+	USB3_DP_COM_PHY_MODE_CTRL,
+	USB3_DP_COM_TYPEC_CTRL,
+	USB3_DP_COM_SWI_CTRL,
+	USB3_PCS_MISC_CLAMP_ENABLE,
+	/* TypeC port select configuration (optional) */
 	USB3_PHY_PCS_MISC_TYPEC_CTRL,
 	USB3_PHY_REG_MAX,
 };
@@ -99,6 +136,8 @@
 	int			init_seq_len;
 	unsigned int		*qmp_phy_reg_offset;
 	int			reg_offset_cnt;
+
+	int			port_select;
 };
 
 static const struct of_device_id msm_usb_id_table[] = {
@@ -111,6 +150,9 @@
 	{
 		.compatible = "qcom,usb-ssphy-qmp-v2",
 	},
+	{
+		.compatible = "qcom,usb-ssphy-qmp-dp-combo",
+	},
 	{ },
 };
 MODULE_DEVICE_TABLE(of, msm_usb_id_table);
@@ -132,6 +174,21 @@
 			phy->phy_reg[USB3_PHY_LFPS_RXTERM_IRQ_CLEAR]);
 }
 
+static void msm_ssusb_qmp_clamp_enable(struct msm_ssphy_qmp *phy, bool val)
+{
+	switch (phy->phy.type) {
+	case USB_PHY_TYPE_USB3_DP:
+		writel_relaxed(!val, phy->base +
+			phy->phy_reg[USB3_PCS_MISC_CLAMP_ENABLE]);
+		break;
+	case USB_PHY_TYPE_USB3:
+		writel_relaxed(!!val, phy->vls_clamp_reg);
+		break;
+	default:
+		break;
+	}
+}
+
 static void msm_ssusb_qmp_enable_autonomous(struct msm_ssphy_qmp *phy,
 		int enable)
 {
@@ -152,11 +209,9 @@
 			val &= ~ARCVR_DTCT_EVENT_SEL;
 			writeb_relaxed(val, phy->base + autonomous_mode_offset);
 		}
-
-		/* clamp phy level shifter to perform autonomous detection */
-		writel_relaxed(0x1, phy->vls_clamp_reg);
+		msm_ssusb_qmp_clamp_enable(phy, true);
 	} else {
-		writel_relaxed(0x0, phy->vls_clamp_reg);
+		msm_ssusb_qmp_clamp_enable(phy, false);
 		writeb_relaxed(0, phy->base + autonomous_mode_offset);
 		msm_ssusb_qmp_clr_lfps_rxterm_int(phy);
 	}
@@ -273,12 +328,100 @@
 	return 0;
 }
 
+static void usb_qmp_update_portselect_phymode(struct msm_ssphy_qmp *phy)
+{
+	int val;
+
+	/* perform lane selection */
+	val = -EINVAL;
+	if (phy->phy.flags & PHY_LANE_A) {
+		val = SW_PORTSELECT_MX;
+		phy->port_select = PHY_LANE_A;
+	}
+
+	if (phy->phy.flags & PHY_LANE_B) {
+		val = SW_PORTSELECT | SW_PORTSELECT_MX;
+		phy->port_select = PHY_LANE_B;
+	}
+
+	switch (phy->phy.type) {
+	case USB_PHY_TYPE_USB3_DP:
+		/* override hardware control for reset of qmp phy */
+		writel_relaxed(SW_DPPHY_RESET_MUX | SW_DPPHY_RESET |
+			SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET,
+			phy->base + phy->phy_reg[USB3_DP_COM_RESET_OVRD_CTRL]);
+
+		/* update port select */
+		if (val > 0) {
+			dev_err(phy->phy.dev,
+				"USB DP QMP PHY: Update TYPEC CTRL(%d)\n", val);
+			writel_relaxed(val, phy->base +
+				phy->phy_reg[USB3_DP_COM_TYPEC_CTRL]);
+		}
+
+		writel_relaxed(USB3_MODE | DP_MODE,
+			phy->base + phy->phy_reg[USB3_DP_COM_PHY_MODE_CTRL]);
+
+		/* activate register access of LANE for both USB3 and DP */
+		writel_relaxed(USB3_SWI_ACT_ACCESS_EN | DP_SWI_ACT_ACCESS_EN,
+			phy->base + phy->phy_reg[USB3_DP_COM_SWI_CTRL]);
+
+		/* bring both QMP USB and QMP DP PHYs PCS block out of reset */
+		writel_relaxed(0x00,
+			phy->base + phy->phy_reg[USB3_DP_COM_RESET_OVRD_CTRL]);
+		break;
+	case  USB_PHY_TYPE_USB3:
+		if (val > 0) {
+			dev_err(phy->phy.dev,
+				"USB QMP PHY: Update TYPEC CTRL(%d)\n", val);
+			writel_relaxed(val, phy->base +
+				phy->phy_reg[USB3_PHY_PCS_MISC_TYPEC_CTRL]);
+		}
+		break;
+	default:
+		dev_err(phy->phy.dev, "portselect: Unknown USB QMP PHY type\n");
+		break;
+	}
+
+	/* Make sure above selection and reset sequence is gone through */
+	mb();
+}
+
+static void usb_qmp_powerup_phy(struct msm_ssphy_qmp *phy)
+{
+	switch (phy->phy.type) {
+	case USB_PHY_TYPE_USB3_DP:
+		/* power up USB3 and DP common logic block */
+		writel_relaxed(0x01,
+			phy->base + phy->phy_reg[USB3_DP_COM_POWER_DOWN_CTRL]);
+
+		/*
+		 * Don't write 0x0 to DP_COM_SW_RESET as next operation is to
+		 * update phymode and port select which needs DP_COM_SW_RESET
+		 * as 0x1.
+		 */
+
+		/* intentional fall-through */
+	case USB_PHY_TYPE_USB3:
+		/* power up USB3 PHY */
+		writel_relaxed(0x01,
+			phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+		break;
+	default:
+		dev_err(phy->phy.dev, "phy_powerup: Unknown USB QMP PHY type\n");
+		break;
+	}
+
+	/* Make sure that above write completed to power up PHY */
+	mb();
+}
+
 /* SSPHY Initialization */
 static int msm_ssphy_qmp_init(struct usb_phy *uphy)
 {
 	struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
 					phy);
-	int ret, val;
+	int ret;
 	unsigned int init_timeout_usec = INIT_MAX_TIME_USEC;
 	const struct qmp_reg_val *reg = NULL;
 
@@ -297,11 +440,11 @@
 
 	msm_ssphy_qmp_enable_clks(phy, true);
 
-	writel_relaxed(0x01,
-		phy->base + phy->phy_reg[USB3_PHY_POWER_DOWN_CONTROL]);
+	/* power up PHY */
+	usb_qmp_powerup_phy(phy);
 
-	/* Make sure that above write completed to get PHY into POWER DOWN */
-	mb();
+	/* select appropriate port select and PHY mode if applicable */
+	usb_qmp_update_portselect_phymode(phy);
 
 	reg = (struct qmp_reg_val *)phy->qmp_phy_init_seq;
 
@@ -312,20 +455,15 @@
 		return ret;
 	}
 
-	/* perform lane selection */
-	val = -EINVAL;
-	if (phy->phy.flags & PHY_LANE_A)
-		val = SW_PORTSELECT_MX;
+	/* perform software reset of PHY common logic */
+	if (phy->phy.type == USB_PHY_TYPE_USB3_DP)
+		writel_relaxed(0x00,
+			phy->base + phy->phy_reg[USB3_DP_COM_SW_RESET]);
 
-	if (phy->phy.flags & PHY_LANE_B)
-		val = SW_PORTSELECT | SW_PORTSELECT_MX;
-
-	if (val > 0)
-		writel_relaxed(val,
-			phy->base + phy->phy_reg[USB3_PHY_PCS_MISC_TYPEC_CTRL]);
-
-	writel_relaxed(0x03, phy->base + phy->phy_reg[USB3_PHY_START]);
+	/* perform software reset of PCS/Serdes */
 	writel_relaxed(0x00, phy->base + phy->phy_reg[USB3_PHY_SW_RESET]);
+	/* start PCS/Serdes to operation mode */
+	writel_relaxed(0x03, phy->base + phy->phy_reg[USB3_PHY_START]);
 
 	/* Make sure above write completed to bring PHY out of reset */
 	mb();
@@ -350,6 +488,38 @@
 	return 0;
 }
 
+static int msm_ssphy_qmp_dp_combo_reset(struct usb_phy *uphy)
+{
+	struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
+					phy);
+	int ret = 0;
+
+	/*
+	 * Avoid global reset if there is no change in port select which
+	 * shall be useful while changing PHY mode i.e. transition from
+	 * 4 LANE/2 LANE.
+	 */
+	if ((((phy->phy.flags & PHY_LANE_A) == phy->port_select)) ||
+			((phy->phy.flags & PHY_LANE_B) == phy->port_select))
+		goto exit;
+
+	dev_dbg(uphy->dev, "Global reset of QMP DP combo phy\n");
+	/* Assert QMP USB DP combo PHY reset */
+	ret = reset_control_assert(phy->phy_reset);
+	if (ret) {
+		dev_err(uphy->dev, "phy_reset assert failed\n");
+		goto exit;
+	}
+
+	/* De-Assert QMP USB DP combo PHY reset */
+	ret = reset_control_deassert(phy->phy_reset);
+	if (ret)
+		dev_err(uphy->dev, "phy_reset deassert failed\n");
+
+exit:
+	return ret;
+}
+
 static int msm_ssphy_qmp_reset(struct usb_phy *uphy)
 {
 	struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
@@ -623,6 +793,11 @@
 	if (!phy)
 		return -ENOMEM;
 
+	phy->phy.type = USB_PHY_TYPE_USB3;
+	if (of_device_is_compatible(dev->of_node,
+			"qcom,usb-ssphy-qmp-dp-combo"))
+		phy->phy.type = USB_PHY_TYPE_USB3_DP;
+
 	ret = msm_ssphy_qmp_get_clks(phy, dev);
 	if (ret)
 		goto err;
@@ -634,11 +809,14 @@
 		goto err;
 	}
 
-	phy->phy_phy_reset = devm_reset_control_get(dev, "phy_phy_reset");
-	if (IS_ERR(phy->phy_phy_reset)) {
-		ret = PTR_ERR(phy->phy_phy_reset);
-		dev_dbg(dev, "failed to get phy_phy_reset\n");
-		goto err;
+	if (phy->phy.type == USB_PHY_TYPE_USB3) {
+		phy->phy_phy_reset = devm_reset_control_get(dev,
+						"phy_phy_reset");
+		if (IS_ERR(phy->phy_phy_reset)) {
+			ret = PTR_ERR(phy->phy_phy_reset);
+			dev_dbg(dev, "failed to get phy_phy_reset\n");
+			goto err;
+		}
 	}
 
 	of_get_property(dev->of_node, "qcom,qmp-phy-reg-offset", &size);
@@ -673,22 +851,25 @@
 		dev_err(dev, "failed getting qmp_phy_base\n");
 		return -ENODEV;
 	}
-	phy->base = devm_ioremap_resource(dev, res);
-	if (IS_ERR(phy->base)) {
+
+	/*
+	 * For USB QMP DP combo PHY, common set of registers shall be accessed
+	 * by DP driver as well.
+	 */
+	phy->base = devm_ioremap_nocache(dev, res->start, resource_size(res));
+	if (IS_ERR_OR_NULL(phy->base)) {
 		ret = PTR_ERR(phy->base);
 		goto err;
 	}
 
-	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
-			"vls_clamp_reg");
-	if (!res) {
-		dev_err(dev, "failed getting vls_clamp_reg\n");
-		return -ENODEV;
-	}
-	phy->vls_clamp_reg = devm_ioremap_resource(dev, res);
-	if (IS_ERR(phy->vls_clamp_reg)) {
-		dev_err(dev, "couldn't find vls_clamp_reg address.\n");
-		return PTR_ERR(phy->vls_clamp_reg);
+	if (phy->phy.type == USB_PHY_TYPE_USB3) {
+		res = platform_get_resource_byname(pdev,
+				IORESOURCE_MEM, "vls_clamp_reg");
+		phy->vls_clamp_reg = devm_ioremap_resource(dev, res);
+		if (IS_ERR(phy->vls_clamp_reg)) {
+			dev_err(dev, "couldn't find vls_clamp_reg address.\n");
+			return PTR_ERR(phy->vls_clamp_reg);
+		}
 	}
 
 	res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -787,8 +968,11 @@
 	phy->phy.set_suspend		= msm_ssphy_qmp_set_suspend;
 	phy->phy.notify_connect		= msm_ssphy_qmp_notify_connect;
 	phy->phy.notify_disconnect	= msm_ssphy_qmp_notify_disconnect;
-	phy->phy.reset			= msm_ssphy_qmp_reset;
-	phy->phy.type			= USB_PHY_TYPE_USB3;
+
+	if (phy->phy.type == USB_PHY_TYPE_USB3_DP)
+		phy->phy.reset		= msm_ssphy_qmp_dp_combo_reset;
+	else
+		phy->phy.reset		= msm_ssphy_qmp_reset;
 
 	ret = usb_add_phy_dev(&phy->phy);
 
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 1536aeb..4e894d3 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2532,17 +2532,14 @@
 	}
 
 	nfs4_stateid_copy(&stateid, &delegation->stateid);
-	if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
+	if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) ||
+		!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED,
+			&delegation->flags)) {
 		rcu_read_unlock();
 		nfs_finish_clear_delegation_stateid(state, &stateid);
 		return;
 	}
 
-	if (!test_and_clear_bit(NFS_DELEGATION_TEST_EXPIRED, &delegation->flags)) {
-		rcu_read_unlock();
-		return;
-	}
-
 	cred = get_rpccred(delegation->cred);
 	rcu_read_unlock();
 	status = nfs41_test_and_free_expired_stateid(server, &stateid, cred);
diff --git a/fs/nfsd/nfsproc.c b/fs/nfsd/nfsproc.c
index 010aff5..536009e 100644
--- a/fs/nfsd/nfsproc.c
+++ b/fs/nfsd/nfsproc.c
@@ -790,6 +790,7 @@
 		{ nfserr_serverfault, -ESERVERFAULT },
 		{ nfserr_serverfault, -ENFILE },
 		{ nfserr_io, -EUCLEAN },
+		{ nfserr_perm, -ENOKEY },
 	};
 	int	i;
 
diff --git a/fs/sdcardfs/file.c b/fs/sdcardfs/file.c
index eee4eb5..c0146e0 100644
--- a/fs/sdcardfs/file.c
+++ b/fs/sdcardfs/file.c
@@ -113,6 +113,10 @@
 	if (lower_file->f_op->unlocked_ioctl)
 		err = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
 
+	/* some ioctls can change inode attributes (EXT2_IOC_SETFLAGS) */
+	if (!err)
+		sdcardfs_copy_and_fix_attrs(file_inode(file),
+				      file_inode(lower_file));
 out:
 	return err;
 }
@@ -176,12 +180,6 @@
 			goto out;
 		}
 		saved_vm_ops = vma->vm_ops; /* save: came from lower ->mmap */
-		err = do_munmap(current->mm, vma->vm_start,
-				vma->vm_end - vma->vm_start);
-		if (err) {
-			pr_err("sdcardfs: do_munmap failed %d\n", err);
-			goto out;
-		}
 	}
 
 	/*
@@ -318,6 +316,75 @@
 	return err;
 }
 
+/*
+ * Sdcardfs cannot use generic_file_llseek as ->llseek, because it would
+ * only set the offset of the upper file.  So we have to implement our
+ * own method to set both the upper and lower file offsets
+ * consistently.
+ */
+static loff_t sdcardfs_file_llseek(struct file *file, loff_t offset, int whence)
+{
+	int err;
+	struct file *lower_file;
+
+	err = generic_file_llseek(file, offset, whence);
+	if (err < 0)
+		goto out;
+
+	lower_file = sdcardfs_lower_file(file);
+	err = generic_file_llseek(lower_file, offset, whence);
+
+out:
+	return err;
+}
+
+/*
+ * Sdcardfs read_iter, redirect modified iocb to lower read_iter
+ */
+ssize_t sdcardfs_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+	int err;
+	struct file *file = iocb->ki_filp, *lower_file;
+
+	lower_file = sdcardfs_lower_file(file);
+	if (!lower_file->f_op->read_iter) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	get_file(lower_file); /* prevent lower_file from being released */
+	iocb->ki_filp = lower_file;
+	err = lower_file->f_op->read_iter(iocb, iter);
+	/* ? wait IO finish to update atime as ecryptfs ? */
+	iocb->ki_filp = file;
+	fput(lower_file);
+out:
+	return err;
+}
+
+/*
+ * Sdcardfs write_iter, redirect modified iocb to lower write_iter
+ */
+ssize_t sdcardfs_write_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+	int err;
+	struct file *file = iocb->ki_filp, *lower_file;
+
+	lower_file = sdcardfs_lower_file(file);
+	if (!lower_file->f_op->write_iter) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	get_file(lower_file); /* prevent lower_file from being released */
+	iocb->ki_filp = lower_file;
+	err = lower_file->f_op->write_iter(iocb, iter);
+	iocb->ki_filp = file;
+	fput(lower_file);
+out:
+	return err;
+}
+
 const struct file_operations sdcardfs_main_fops = {
 	.llseek		= generic_file_llseek,
 	.read		= sdcardfs_read,
@@ -332,11 +399,13 @@
 	.release	= sdcardfs_file_release,
 	.fsync		= sdcardfs_fsync,
 	.fasync		= sdcardfs_fasync,
+	.read_iter	= sdcardfs_read_iter,
+	.write_iter	= sdcardfs_write_iter,
 };
 
 /* trimmed directory options */
 const struct file_operations sdcardfs_dir_fops = {
-	.llseek		= generic_file_llseek,
+	.llseek		= sdcardfs_file_llseek,
 	.read		= generic_read_dir,
 	.iterate	= sdcardfs_readdir,
 	.unlocked_ioctl	= sdcardfs_unlocked_ioctl,
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index f028bfd..f9c0282 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -164,27 +164,25 @@
 }
 
 /*
- * Connect a sdcardfs inode dentry/inode with several lower ones.  This is
- * the classic stackable file system "vnode interposition" action.
- *
- * @dentry: sdcardfs's dentry which interposes on lower one
- * @sb: sdcardfs's super_block
- * @lower_path: the lower path (caller does path_get/put)
+ * Helper interpose routine, called directly by ->lookup to handle
+ * spliced dentries.
  */
-int sdcardfs_interpose(struct dentry *dentry, struct super_block *sb,
-		     struct path *lower_path, userid_t id)
+static struct dentry *__sdcardfs_interpose(struct dentry *dentry,
+					 struct super_block *sb,
+					 struct path *lower_path,
+					 userid_t id)
 {
-	int err = 0;
 	struct inode *inode;
 	struct inode *lower_inode;
 	struct super_block *lower_sb;
+	struct dentry *ret_dentry;
 
 	lower_inode = d_inode(lower_path->dentry);
 	lower_sb = sdcardfs_lower_super(sb);
 
 	/* check that the lower file system didn't cross a mount point */
 	if (lower_inode->i_sb != lower_sb) {
-		err = -EXDEV;
+		ret_dentry = ERR_PTR(-EXDEV);
 		goto out;
 	}
 
@@ -196,14 +194,32 @@
 	/* inherit lower inode number for sdcardfs's inode */
 	inode = sdcardfs_iget(sb, lower_inode, id);
 	if (IS_ERR(inode)) {
-		err = PTR_ERR(inode);
+		ret_dentry = ERR_CAST(inode);
 		goto out;
 	}
 
-	d_add(dentry, inode);
+	ret_dentry = d_splice_alias(inode, dentry);
+	dentry = ret_dentry ?: dentry;
 	update_derived_permission_lock(dentry);
 out:
-	return err;
+	return ret_dentry;
+}
+
+/*
+ * Connect an sdcardfs inode dentry/inode with several lower ones.  This is
+ * the classic stackable file system "vnode interposition" action.
+ *
+ * @dentry: sdcardfs's dentry which interposes on lower one
+ * @sb: sdcardfs's super_block
+ * @lower_path: the lower path (caller does path_get/put)
+ */
+int sdcardfs_interpose(struct dentry *dentry, struct super_block *sb,
+		     struct path *lower_path, userid_t id)
+{
+	struct dentry *ret_dentry;
+
+	ret_dentry = __sdcardfs_interpose(dentry, sb, lower_path, id);
+	return PTR_ERR(ret_dentry);
 }
 
 struct sdcardfs_name_data {
@@ -244,6 +260,7 @@
 	const struct qstr *name;
 	struct path lower_path;
 	struct qstr dname;
+	struct dentry *ret_dentry = NULL;
 	struct sdcardfs_sb_info *sbi;
 
 	sbi = SDCARDFS_SB(dentry->d_sb);
@@ -330,9 +347,13 @@
 		}
 
 		sdcardfs_set_lower_path(dentry, &lower_path);
-		err = sdcardfs_interpose(dentry, dentry->d_sb, &lower_path, id);
-		if (err) /* path_put underlying path on error */
+		ret_dentry =
+			__sdcardfs_interpose(dentry, dentry->d_sb, &lower_path, id);
+		if (IS_ERR(ret_dentry)) {
+			err = PTR_ERR(ret_dentry);
+			 /* path_put underlying path on error */
 			sdcardfs_put_reset_lower_path(dentry);
+		}
 		goto out;
 	}
 
@@ -372,7 +393,9 @@
 		err = 0;
 
 out:
-	return ERR_PTR(err);
+	if (err)
+		return ERR_PTR(err);
+	return ret_dentry;
 }
 
 /*
diff --git a/fs/sdcardfs/main.c b/fs/sdcardfs/main.c
index 7344635..953d215 100644
--- a/fs/sdcardfs/main.c
+++ b/fs/sdcardfs/main.c
@@ -471,10 +471,15 @@
 	pr_info("Completed sdcardfs module unload\n");
 }
 
-MODULE_AUTHOR("Erez Zadok, Filesystems and Storage Lab, Stony Brook University"
-	      " (http://www.fsl.cs.sunysb.edu/)");
-MODULE_DESCRIPTION("Wrapfs " SDCARDFS_VERSION
-		   " (http://wrapfs.filesystems.org/)");
+/* Original wrapfs authors */
+MODULE_AUTHOR("Erez Zadok, Filesystems and Storage Lab, Stony Brook University (http://www.fsl.cs.sunysb.edu/)");
+
+/* Original sdcardfs authors */
+MODULE_AUTHOR("Woojoong Lee, Daeho Jeong, Kitae Lee, Yeongjin Gil System Memory Lab., Samsung Electronics");
+
+/* Current maintainer */
+MODULE_AUTHOR("Daniel Rosenberg, Google");
+MODULE_DESCRIPTION("Sdcardfs " SDCARDFS_VERSION);
 MODULE_LICENSE("GPL");
 
 module_init(init_sdcardfs_fs);
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index d346d42..33db69b 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -39,6 +39,7 @@
 #include "xfs_rmap_btree.h"
 #include "xfs_btree.h"
 #include "xfs_refcount_btree.h"
+#include "xfs_ialloc_btree.h"
 
 /*
  * Per-AG Block Reservations
@@ -200,22 +201,30 @@
 	struct xfs_mount		*mp = pag->pag_mount;
 	struct xfs_ag_resv		*resv;
 	int				error;
+	xfs_extlen_t			reserved;
 
-	resv = xfs_perag_resv(pag, type);
 	if (used > ask)
 		ask = used;
-	resv->ar_asked = ask;
-	resv->ar_reserved = resv->ar_orig_reserved = ask - used;
-	mp->m_ag_max_usable -= ask;
+	reserved = ask - used;
 
-	trace_xfs_ag_resv_init(pag, type, ask);
-
-	error = xfs_mod_fdblocks(mp, -(int64_t)resv->ar_reserved, true);
-	if (error)
+	error = xfs_mod_fdblocks(mp, -(int64_t)reserved, true);
+	if (error) {
 		trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
 				error, _RET_IP_);
+		xfs_warn(mp,
+"Per-AG reservation for AG %u failed.  Filesystem may run out of space.",
+				pag->pag_agno);
+		return error;
+	}
 
-	return error;
+	mp->m_ag_max_usable -= ask;
+
+	resv = xfs_perag_resv(pag, type);
+	resv->ar_asked = ask;
+	resv->ar_reserved = resv->ar_orig_reserved = reserved;
+
+	trace_xfs_ag_resv_init(pag, type, ask);
+	return 0;
 }
 
 /* Create a per-AG block reservation. */
@@ -223,6 +232,8 @@
 xfs_ag_resv_init(
 	struct xfs_perag		*pag)
 {
+	struct xfs_mount		*mp = pag->pag_mount;
+	xfs_agnumber_t			agno = pag->pag_agno;
 	xfs_extlen_t			ask;
 	xfs_extlen_t			used;
 	int				error = 0;
@@ -231,23 +242,45 @@
 	if (pag->pag_meta_resv.ar_asked == 0) {
 		ask = used = 0;
 
-		error = xfs_refcountbt_calc_reserves(pag->pag_mount,
-				pag->pag_agno, &ask, &used);
+		error = xfs_refcountbt_calc_reserves(mp, agno, &ask, &used);
+		if (error)
+			goto out;
+
+		error = xfs_finobt_calc_reserves(mp, agno, &ask, &used);
 		if (error)
 			goto out;
 
 		error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
 				ask, used);
-		if (error)
-			goto out;
+		if (error) {
+			/*
+			 * Because we didn't have per-AG reservations when the
+			 * finobt feature was added we might not be able to
+			 * reserve all needed blocks.  Warn and fall back to the
+			 * old and potentially buggy code in that case, but
+			 * ensure we do have the reservation for the refcountbt.
+			 */
+			ask = used = 0;
+
+			mp->m_inotbt_nores = true;
+
+			error = xfs_refcountbt_calc_reserves(mp, agno, &ask,
+					&used);
+			if (error)
+				goto out;
+
+			error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
+					ask, used);
+			if (error)
+				goto out;
+		}
 	}
 
 	/* Create the AGFL metadata reservation */
 	if (pag->pag_agfl_resv.ar_asked == 0) {
 		ask = used = 0;
 
-		error = xfs_rmapbt_calc_reserves(pag->pag_mount, pag->pag_agno,
-				&ask, &used);
+		error = xfs_rmapbt_calc_reserves(mp, agno, &ask, &used);
 		if (error)
 			goto out;
 
@@ -256,9 +289,16 @@
 			goto out;
 	}
 
+#ifdef DEBUG
+	/* need to read in the AGF for the ASSERT below to work */
+	error = xfs_alloc_pagf_init(pag->pag_mount, NULL, pag->pag_agno, 0);
+	if (error)
+		return error;
+
 	ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
 	       xfs_perag_resv(pag, XFS_AG_RESV_AGFL)->ar_reserved <=
 	       pag->pagf_freeblks + pag->pagf_flcount);
+#endif
 out:
 	return error;
 }
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index f52fd63..5a508b0 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -769,8 +769,8 @@
 		args.type = XFS_ALLOCTYPE_START_BNO;
 		args.fsbno = XFS_INO_TO_FSB(mp, ip->i_ino);
 	} else if (dfops->dop_low) {
-try_another_ag:
 		args.type = XFS_ALLOCTYPE_START_BNO;
+try_another_ag:
 		args.fsbno = *firstblock;
 	} else {
 		args.type = XFS_ALLOCTYPE_NEAR_BNO;
@@ -796,17 +796,19 @@
 	if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
 	    args.fsbno == NULLFSBLOCK &&
 	    args.type == XFS_ALLOCTYPE_NEAR_BNO) {
-		dfops->dop_low = true;
+		args.type = XFS_ALLOCTYPE_FIRST_AG;
 		goto try_another_ag;
 	}
+	if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
+		xfs_iroot_realloc(ip, -1, whichfork);
+		xfs_btree_del_cursor(cur, XFS_BTREE_ERROR);
+		return -ENOSPC;
+	}
 	/*
 	 * Allocation can't fail, the space was reserved.
 	 */
-	ASSERT(args.fsbno != NULLFSBLOCK);
 	ASSERT(*firstblock == NULLFSBLOCK ||
-	       args.agno == XFS_FSB_TO_AGNO(mp, *firstblock) ||
-	       (dfops->dop_low &&
-		args.agno > XFS_FSB_TO_AGNO(mp, *firstblock)));
+	       args.agno >= XFS_FSB_TO_AGNO(mp, *firstblock));
 	*firstblock = cur->bc_private.b.firstblock = args.fsbno;
 	cur->bc_private.b.allocated++;
 	ip->i_d.di_nblocks++;
@@ -1278,7 +1280,6 @@
 	/* REFERENCED */
 	xfs_extnum_t		room;	/* number of entries there's room for */
 
-	bno = NULLFSBLOCK;
 	mp = ip->i_mount;
 	ifp = XFS_IFORK_PTR(ip, whichfork);
 	exntf = (whichfork != XFS_DATA_FORK) ? XFS_EXTFMT_NOSTATE :
@@ -1291,9 +1292,7 @@
 	ASSERT(level > 0);
 	pp = XFS_BMAP_BROOT_PTR_ADDR(mp, block, 1, ifp->if_broot_bytes);
 	bno = be64_to_cpu(*pp);
-	ASSERT(bno != NULLFSBLOCK);
-	ASSERT(XFS_FSB_TO_AGNO(mp, bno) < mp->m_sb.sb_agcount);
-	ASSERT(XFS_FSB_TO_AGBNO(mp, bno) < mp->m_sb.sb_agblocks);
+
 	/*
 	 * Go down the tree until leaf level is reached, following the first
 	 * pointer (leftmost) at each level.
@@ -1955,6 +1954,7 @@
 		 */
 		trace_xfs_bmap_pre_update(bma->ip, bma->idx, state, _THIS_IP_);
 		xfs_bmbt_set_startblock(ep, new->br_startblock);
+		xfs_bmbt_set_state(ep, new->br_state);
 		trace_xfs_bmap_post_update(bma->ip, bma->idx, state, _THIS_IP_);
 
 		(*nextents)++;
@@ -2293,6 +2293,7 @@
 xfs_bmap_add_extent_unwritten_real(
 	struct xfs_trans	*tp,
 	xfs_inode_t		*ip,	/* incore inode pointer */
+	int			whichfork,
 	xfs_extnum_t		*idx,	/* extent number to update/insert */
 	xfs_btree_cur_t		**curp,	/* if *curp is null, not a btree */
 	xfs_bmbt_irec_t		*new,	/* new data to add to file extents */
@@ -2312,12 +2313,14 @@
 					/* left is 0, right is 1, prev is 2 */
 	int			rval=0;	/* return value (logging flags) */
 	int			state = 0;/* state bits, accessed thru macros */
-	struct xfs_mount	*mp = tp->t_mountp;
+	struct xfs_mount	*mp = ip->i_mount;
 
 	*logflagsp = 0;
 
 	cur = *curp;
-	ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
+	ifp = XFS_IFORK_PTR(ip, whichfork);
+	if (whichfork == XFS_COW_FORK)
+		state |= BMAP_COWFORK;
 
 	ASSERT(*idx >= 0);
 	ASSERT(*idx <= xfs_iext_count(ifp));
@@ -2376,7 +2379,7 @@
 	 * Don't set contiguous if the combined extent would be too large.
 	 * Also check for all-three-contiguous being too large.
 	 */
-	if (*idx < xfs_iext_count(&ip->i_df) - 1) {
+	if (*idx < xfs_iext_count(ifp) - 1) {
 		state |= BMAP_RIGHT_VALID;
 		xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT);
 		if (isnullstartblock(RIGHT.br_startblock))
@@ -2416,7 +2419,8 @@
 		trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
 
 		xfs_iext_remove(ip, *idx + 1, 2, state);
-		ip->i_d.di_nextents -= 2;
+		XFS_IFORK_NEXT_SET(ip, whichfork,
+				XFS_IFORK_NEXTENTS(ip, whichfork) - 2);
 		if (cur == NULL)
 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
 		else {
@@ -2459,7 +2463,8 @@
 		trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
 
 		xfs_iext_remove(ip, *idx + 1, 1, state);
-		ip->i_d.di_nextents--;
+		XFS_IFORK_NEXT_SET(ip, whichfork,
+				XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
 		if (cur == NULL)
 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
 		else {
@@ -2494,7 +2499,8 @@
 		xfs_bmbt_set_state(ep, newext);
 		trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
 		xfs_iext_remove(ip, *idx + 1, 1, state);
-		ip->i_d.di_nextents--;
+		XFS_IFORK_NEXT_SET(ip, whichfork,
+				XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
 		if (cur == NULL)
 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
 		else {
@@ -2606,7 +2612,8 @@
 		trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
 
 		xfs_iext_insert(ip, *idx, 1, new, state);
-		ip->i_d.di_nextents++;
+		XFS_IFORK_NEXT_SET(ip, whichfork,
+				XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
 		if (cur == NULL)
 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
 		else {
@@ -2684,7 +2691,8 @@
 		++*idx;
 		xfs_iext_insert(ip, *idx, 1, new, state);
 
-		ip->i_d.di_nextents++;
+		XFS_IFORK_NEXT_SET(ip, whichfork,
+				XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
 		if (cur == NULL)
 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
 		else {
@@ -2732,7 +2740,8 @@
 		++*idx;
 		xfs_iext_insert(ip, *idx, 2, &r[0], state);
 
-		ip->i_d.di_nextents += 2;
+		XFS_IFORK_NEXT_SET(ip, whichfork,
+				XFS_IFORK_NEXTENTS(ip, whichfork) + 2);
 		if (cur == NULL)
 			rval = XFS_ILOG_CORE | XFS_ILOG_DEXT;
 		else {
@@ -2786,17 +2795,17 @@
 	}
 
 	/* update reverse mappings */
-	error = xfs_rmap_convert_extent(mp, dfops, ip, XFS_DATA_FORK, new);
+	error = xfs_rmap_convert_extent(mp, dfops, ip, whichfork, new);
 	if (error)
 		goto done;
 
 	/* convert to a btree if necessary */
-	if (xfs_bmap_needs_btree(ip, XFS_DATA_FORK)) {
+	if (xfs_bmap_needs_btree(ip, whichfork)) {
 		int	tmp_logflags;	/* partial log flag return val */
 
 		ASSERT(cur == NULL);
 		error = xfs_bmap_extents_to_btree(tp, ip, first, dfops, &cur,
-				0, &tmp_logflags, XFS_DATA_FORK);
+				0, &tmp_logflags, whichfork);
 		*logflagsp |= tmp_logflags;
 		if (error)
 			goto done;
@@ -2808,7 +2817,7 @@
 		*curp = cur;
 	}
 
-	xfs_bmap_check_leaf_extents(*curp, ip, XFS_DATA_FORK);
+	xfs_bmap_check_leaf_extents(*curp, ip, whichfork);
 done:
 	*logflagsp |= rval;
 	return error;
@@ -2900,7 +2909,8 @@
 		oldlen = startblockval(left.br_startblock) +
 			startblockval(new->br_startblock) +
 			startblockval(right.br_startblock);
-		newlen = xfs_bmap_worst_indlen(ip, temp);
+		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
+					 oldlen);
 		xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
 			nullstartblock((int)newlen));
 		trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
@@ -2921,7 +2931,8 @@
 		xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp);
 		oldlen = startblockval(left.br_startblock) +
 			startblockval(new->br_startblock);
-		newlen = xfs_bmap_worst_indlen(ip, temp);
+		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
+					 oldlen);
 		xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx),
 			nullstartblock((int)newlen));
 		trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_);
@@ -2937,7 +2948,8 @@
 		temp = new->br_blockcount + right.br_blockcount;
 		oldlen = startblockval(new->br_startblock) +
 			startblockval(right.br_startblock);
-		newlen = xfs_bmap_worst_indlen(ip, temp);
+		newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
+					 oldlen);
 		xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx),
 			new->br_startoff,
 			nullstartblock((int)newlen), temp, right.br_state);
@@ -3913,17 +3925,13 @@
 		 * the first block that was allocated.
 		 */
 		ASSERT(*ap->firstblock == NULLFSBLOCK ||
-		       XFS_FSB_TO_AGNO(mp, *ap->firstblock) ==
-		       XFS_FSB_TO_AGNO(mp, args.fsbno) ||
-		       (ap->dfops->dop_low &&
-			XFS_FSB_TO_AGNO(mp, *ap->firstblock) <
-			XFS_FSB_TO_AGNO(mp, args.fsbno)));
+		       XFS_FSB_TO_AGNO(mp, *ap->firstblock) <=
+		       XFS_FSB_TO_AGNO(mp, args.fsbno));
 
 		ap->blkno = args.fsbno;
 		if (*ap->firstblock == NULLFSBLOCK)
 			*ap->firstblock = args.fsbno;
-		ASSERT(nullfb || fb_agno == args.agno ||
-		       (ap->dfops->dop_low && fb_agno < args.agno));
+		ASSERT(nullfb || fb_agno <= args.agno);
 		ap->length = args.len;
 		if (!(ap->flags & XFS_BMAPI_COWFORK))
 			ap->ip->i_d.di_nblocks += args.len;
@@ -4249,6 +4257,19 @@
 	return 0;
 }
 
+/*
+ * Add a delayed allocation extent to an inode. Blocks are reserved from the
+ * global pool and the extent inserted into the inode in-core extent tree.
+ *
+ * On entry, got refers to the first extent beyond the offset of the extent to
+ * allocate or eof is specified if no such extent exists. On return, got refers
+ * to the extent record that was inserted to the inode fork.
+ *
+ * Note that the allocated extent may have been merged with contiguous extents
+ * during insertion into the inode fork. Thus, got does not reflect the current
+ * state of the inode fork on return. If necessary, the caller can use lastx to
+ * look up the updated record in the inode fork.
+ */
 int
 xfs_bmapi_reserve_delalloc(
 	struct xfs_inode	*ip,
@@ -4335,13 +4356,8 @@
 	got->br_startblock = nullstartblock(indlen);
 	got->br_blockcount = alen;
 	got->br_state = XFS_EXT_NORM;
-	xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
 
-	/*
-	 * Update our extent pointer, given that xfs_bmap_add_extent_hole_delay
-	 * might have merged it into one of the neighbouring ones.
-	 */
-	xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *lastx), got);
+	xfs_bmap_add_extent_hole_delay(ip, whichfork, lastx, got);
 
 	/*
 	 * Tag the inode if blocks were preallocated. Note that COW fork
@@ -4353,10 +4369,6 @@
 	if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len))
 		xfs_inode_set_cowblocks_tag(ip);
 
-	ASSERT(got->br_startoff <= aoff);
-	ASSERT(got->br_startoff + got->br_blockcount >= aoff + alen);
-	ASSERT(isnullstartblock(got->br_startblock));
-	ASSERT(got->br_state == XFS_EXT_NORM);
 	return 0;
 
 out_unreserve_blocks:
@@ -4461,10 +4473,16 @@
 	bma->got.br_state = XFS_EXT_NORM;
 
 	/*
-	 * A wasdelay extent has been initialized, so shouldn't be flagged
-	 * as unwritten.
+	 * In the data fork, a wasdelay extent has been initialized, so
+	 * shouldn't be flagged as unwritten.
+	 *
+	 * For the cow fork, however, we convert delalloc reservations
+	 * (extents allocated for speculative preallocation) to
+	 * allocated unwritten extents, and only convert the unwritten
+	 * extents to real extents when we're about to write the data.
 	 */
-	if (!bma->wasdel && (bma->flags & XFS_BMAPI_PREALLOC) &&
+	if ((!bma->wasdel || (bma->flags & XFS_BMAPI_COWFORK)) &&
+	    (bma->flags & XFS_BMAPI_PREALLOC) &&
 	    xfs_sb_version_hasextflgbit(&mp->m_sb))
 		bma->got.br_state = XFS_EXT_UNWRITTEN;
 
@@ -4515,8 +4533,6 @@
 			(XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT))
 		return 0;
 
-	ASSERT(whichfork != XFS_COW_FORK);
-
 	/*
 	 * Modify (by adding) the state flag, if writing.
 	 */
@@ -4541,8 +4557,8 @@
 			return error;
 	}
 
-	error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, &bma->idx,
-			&bma->cur, mval, bma->firstblock, bma->dfops,
+	error = xfs_bmap_add_extent_unwritten_real(bma->tp, bma->ip, whichfork,
+			&bma->idx, &bma->cur, mval, bma->firstblock, bma->dfops,
 			&tmp_logflags);
 	/*
 	 * Log the inode core unconditionally in the unwritten extent conversion
@@ -4551,8 +4567,12 @@
 	 * in the transaction for the sake of fsync(), even if nothing has
 	 * changed, because fsync() will not force the log for this transaction
 	 * unless it sees the inode pinned.
+	 *
+	 * Note: If we're only converting cow fork extents, there aren't
+	 * any on-disk updates to make, so we don't need to log anything.
 	 */
-	bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
+	if (whichfork != XFS_COW_FORK)
+		bma->logflags |= tmp_logflags | XFS_ILOG_CORE;
 	if (error)
 		return error;
 
@@ -4626,15 +4646,15 @@
 	ASSERT(*nmap >= 1);
 	ASSERT(*nmap <= XFS_BMAP_MAX_NMAP);
 	ASSERT(!(flags & XFS_BMAPI_IGSTATE));
-	ASSERT(tp != NULL);
+	ASSERT(tp != NULL ||
+	       (flags & (XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK)) ==
+			(XFS_BMAPI_CONVERT | XFS_BMAPI_COWFORK));
 	ASSERT(len > 0);
 	ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_LOCAL);
 	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
 	ASSERT(!(flags & XFS_BMAPI_REMAP) || whichfork == XFS_DATA_FORK);
 	ASSERT(!(flags & XFS_BMAPI_PREALLOC) || !(flags & XFS_BMAPI_REMAP));
 	ASSERT(!(flags & XFS_BMAPI_CONVERT) || !(flags & XFS_BMAPI_REMAP));
-	ASSERT(!(flags & XFS_BMAPI_PREALLOC) || whichfork != XFS_COW_FORK);
-	ASSERT(!(flags & XFS_BMAPI_CONVERT) || whichfork != XFS_COW_FORK);
 
 	/* zeroing is for currently only for data extents, not metadata */
 	ASSERT((flags & (XFS_BMAPI_METADATA | XFS_BMAPI_ZERO)) !=
@@ -4840,13 +4860,9 @@
 	if (bma.cur) {
 		if (!error) {
 			ASSERT(*firstblock == NULLFSBLOCK ||
-			       XFS_FSB_TO_AGNO(mp, *firstblock) ==
+			       XFS_FSB_TO_AGNO(mp, *firstblock) <=
 			       XFS_FSB_TO_AGNO(mp,
-				       bma.cur->bc_private.b.firstblock) ||
-			       (dfops->dop_low &&
-				XFS_FSB_TO_AGNO(mp, *firstblock) <
-				XFS_FSB_TO_AGNO(mp,
-					bma.cur->bc_private.b.firstblock)));
+				       bma.cur->bc_private.b.firstblock));
 			*firstblock = bma.cur->bc_private.b.firstblock;
 		}
 		xfs_btree_del_cursor(bma.cur,
@@ -4881,34 +4897,59 @@
 	xfs_filblks_t			len2 = *indlen2;
 	xfs_filblks_t			nres = len1 + len2; /* new total res. */
 	xfs_filblks_t			stolen = 0;
+	xfs_filblks_t			resfactor;
 
 	/*
 	 * Steal as many blocks as we can to try and satisfy the worst case
 	 * indlen for both new extents.
 	 */
-	while (nres > ores && avail) {
-		nres--;
-		avail--;
-		stolen++;
-	}
+	if (ores < nres && avail)
+		stolen = XFS_FILBLKS_MIN(nres - ores, avail);
+	ores += stolen;
+
+	 /* nothing else to do if we've satisfied the new reservation */
+	if (ores >= nres)
+		return stolen;
 
 	/*
-	 * The only blocks available are those reserved for the original
-	 * extent and what we can steal from the extent being removed.
-	 * If this still isn't enough to satisfy the combined
-	 * requirements for the two new extents, skim blocks off of each
-	 * of the new reservations until they match what is available.
+	 * We can't meet the total required reservation for the two extents.
+	 * Calculate the percent of the overall shortage between both extents
+	 * and apply this percentage to each of the requested indlen values.
+	 * This distributes the shortage fairly and reduces the chances that one
+	 * of the two extents is left with nothing when extents are repeatedly
+	 * split.
 	 */
-	while (nres > ores) {
-		if (len1) {
-			len1--;
-			nres--;
+	resfactor = (ores * 100);
+	do_div(resfactor, nres);
+	len1 *= resfactor;
+	do_div(len1, 100);
+	len2 *= resfactor;
+	do_div(len2, 100);
+	ASSERT(len1 + len2 <= ores);
+	ASSERT(len1 < *indlen1 && len2 < *indlen2);
+
+	/*
+	 * Hand out the remainder to each extent. If one of the two reservations
+	 * is zero, we want to make sure that one gets a block first. The loop
+	 * below starts with len1, so hand len2 a block right off the bat if it
+	 * is zero.
+	 */
+	ores -= (len1 + len2);
+	ASSERT((*indlen1 - len1) + (*indlen2 - len2) >= ores);
+	if (ores && !len2 && *indlen2) {
+		len2++;
+		ores--;
+	}
+	while (ores) {
+		if (len1 < *indlen1) {
+			len1++;
+			ores--;
 		}
-		if (nres == ores)
+		if (!ores)
 			break;
-		if (len2) {
-			len2--;
-			nres--;
+		if (len2 < *indlen2) {
+			len2++;
+			ores--;
 		}
 	}
 
@@ -5656,8 +5697,8 @@
 			}
 			del.br_state = XFS_EXT_UNWRITTEN;
 			error = xfs_bmap_add_extent_unwritten_real(tp, ip,
-					&lastx, &cur, &del, firstblock, dfops,
-					&logflags);
+					whichfork, &lastx, &cur, &del,
+					firstblock, dfops, &logflags);
 			if (error)
 				goto error0;
 			goto nodelete;
@@ -5714,8 +5755,9 @@
 				prev.br_state = XFS_EXT_UNWRITTEN;
 				lastx--;
 				error = xfs_bmap_add_extent_unwritten_real(tp,
-						ip, &lastx, &cur, &prev,
-						firstblock, dfops, &logflags);
+						ip, whichfork, &lastx, &cur,
+						&prev, firstblock, dfops,
+						&logflags);
 				if (error)
 					goto error0;
 				goto nodelete;
@@ -5723,8 +5765,9 @@
 				ASSERT(del.br_state == XFS_EXT_NORM);
 				del.br_state = XFS_EXT_UNWRITTEN;
 				error = xfs_bmap_add_extent_unwritten_real(tp,
-						ip, &lastx, &cur, &del,
-						firstblock, dfops, &logflags);
+						ip, whichfork, &lastx, &cur,
+						&del, firstblock, dfops,
+						&logflags);
 				if (error)
 					goto error0;
 				goto nodelete;
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index f76c169..5c39186 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -453,8 +453,8 @@
 
 	if (args.fsbno == NULLFSBLOCK) {
 		args.fsbno = be64_to_cpu(start->l);
-try_another_ag:
 		args.type = XFS_ALLOCTYPE_START_BNO;
+try_another_ag:
 		/*
 		 * Make sure there is sufficient room left in the AG to
 		 * complete a full tree split for an extent insert.  If
@@ -494,8 +494,8 @@
 	if (xfs_sb_version_hasreflink(&cur->bc_mp->m_sb) &&
 	    args.fsbno == NULLFSBLOCK &&
 	    args.type == XFS_ALLOCTYPE_NEAR_BNO) {
-		cur->bc_private.b.dfops->dop_low = true;
 		args.fsbno = cur->bc_private.b.firstblock;
+		args.type = XFS_ALLOCTYPE_FIRST_AG;
 		goto try_another_ag;
 	}
 
@@ -512,7 +512,7 @@
 			goto error0;
 		cur->bc_private.b.dfops->dop_low = true;
 	}
-	if (args.fsbno == NULLFSBLOCK) {
+	if (WARN_ON_ONCE(args.fsbno == NULLFSBLOCK)) {
 		XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
 		*stat = 0;
 		return 0;
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 21e6a6a..2849d3f 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -810,7 +810,8 @@
 	xfs_daddr_t		d;		/* real disk block address */
 	int			error;
 
-	ASSERT(fsbno != NULLFSBLOCK);
+	if (!XFS_FSB_SANITY_CHECK(mp, fsbno))
+		return -EFSCORRUPTED;
 	d = XFS_FSB_TO_DADDR(mp, fsbno);
 	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, d,
 				   mp->m_bsize, lock, &bp, ops);
diff --git a/fs/xfs/libxfs/xfs_btree.h b/fs/xfs/libxfs/xfs_btree.h
index c2b01d1..3b0fc1a 100644
--- a/fs/xfs/libxfs/xfs_btree.h
+++ b/fs/xfs/libxfs/xfs_btree.h
@@ -491,7 +491,7 @@
 #define	XFS_FILBLKS_MAX(a,b)	max_t(xfs_filblks_t, (a), (b))
 
 #define	XFS_FSB_SANITY_CHECK(mp,fsb)	\
-	(XFS_FSB_TO_AGNO(mp, fsb) < mp->m_sb.sb_agcount && \
+	(fsb && XFS_FSB_TO_AGNO(mp, fsb) < mp->m_sb.sb_agcount && \
 		XFS_FSB_TO_AGBNO(mp, fsb) < mp->m_sb.sb_agblocks)
 
 /*
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index f2dc1a9..1bdf288 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -2633,7 +2633,7 @@
 /*
  * Readahead the dir/attr block.
  */
-xfs_daddr_t
+int
 xfs_da_reada_buf(
 	struct xfs_inode	*dp,
 	xfs_dablk_t		bno,
@@ -2664,7 +2664,5 @@
 	if (mapp != &map)
 		kmem_free(mapp);
 
-	if (error)
-		return -1;
-	return mappedbno;
+	return error;
 }
diff --git a/fs/xfs/libxfs/xfs_da_btree.h b/fs/xfs/libxfs/xfs_da_btree.h
index 98c75cb..4e29cb6 100644
--- a/fs/xfs/libxfs/xfs_da_btree.h
+++ b/fs/xfs/libxfs/xfs_da_btree.h
@@ -201,7 +201,7 @@
 			       xfs_dablk_t bno, xfs_daddr_t mappedbno,
 			       struct xfs_buf **bpp, int whichfork,
 			       const struct xfs_buf_ops *ops);
-xfs_daddr_t	xfs_da_reada_buf(struct xfs_inode *dp, xfs_dablk_t bno,
+int	xfs_da_reada_buf(struct xfs_inode *dp, xfs_dablk_t bno,
 				xfs_daddr_t mapped_bno, int whichfork,
 				const struct xfs_buf_ops *ops);
 int	xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index 75a5574..bbd1238 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -155,6 +155,42 @@
 	.verify_write = xfs_dir3_free_write_verify,
 };
 
+/* Everything ok in the free block header? */
+static bool
+xfs_dir3_free_header_check(
+	struct xfs_inode	*dp,
+	xfs_dablk_t		fbno,
+	struct xfs_buf		*bp)
+{
+	struct xfs_mount	*mp = dp->i_mount;
+	unsigned int		firstdb;
+	int			maxbests;
+
+	maxbests = dp->d_ops->free_max_bests(mp->m_dir_geo);
+	firstdb = (xfs_dir2_da_to_db(mp->m_dir_geo, fbno) -
+		   xfs_dir2_byte_to_db(mp->m_dir_geo, XFS_DIR2_FREE_OFFSET)) *
+			maxbests;
+	if (xfs_sb_version_hascrc(&mp->m_sb)) {
+		struct xfs_dir3_free_hdr *hdr3 = bp->b_addr;
+
+		if (be32_to_cpu(hdr3->firstdb) != firstdb)
+			return false;
+		if (be32_to_cpu(hdr3->nvalid) > maxbests)
+			return false;
+		if (be32_to_cpu(hdr3->nvalid) < be32_to_cpu(hdr3->nused))
+			return false;
+	} else {
+		struct xfs_dir2_free_hdr *hdr = bp->b_addr;
+
+		if (be32_to_cpu(hdr->firstdb) != firstdb)
+			return false;
+		if (be32_to_cpu(hdr->nvalid) > maxbests)
+			return false;
+		if (be32_to_cpu(hdr->nvalid) < be32_to_cpu(hdr->nused))
+			return false;
+	}
+	return true;
+}
 
 static int
 __xfs_dir3_free_read(
@@ -168,11 +204,22 @@
 
 	err = xfs_da_read_buf(tp, dp, fbno, mappedbno, bpp,
 				XFS_DATA_FORK, &xfs_dir3_free_buf_ops);
+	if (err || !*bpp)
+		return err;
+
+	/* Check things that we can't do in the verifier. */
+	if (!xfs_dir3_free_header_check(dp, fbno, *bpp)) {
+		xfs_buf_ioerror(*bpp, -EFSCORRUPTED);
+		xfs_verifier_error(*bpp);
+		xfs_trans_brelse(tp, *bpp);
+		return -EFSCORRUPTED;
+	}
 
 	/* try read returns without an error or *bpp if it lands in a hole */
-	if (!err && tp && *bpp)
+	if (tp)
 		xfs_trans_buf_set_type(tp, *bpp, XFS_BLFT_DIR_FREE_BUF);
-	return err;
+
+	return 0;
 }
 
 int
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index d45c037..a2818f6 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -51,8 +51,7 @@
 	struct xfs_mount	*mp)
 {
 	if (xfs_sb_version_hasalign(&mp->m_sb) &&
-	    mp->m_sb.sb_inoalignmt >=
-			XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
+	    mp->m_sb.sb_inoalignmt >= xfs_icluster_size_fsb(mp))
 		return mp->m_sb.sb_inoalignmt;
 	return 1;
 }
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 6c6b959..b9c351f 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -82,11 +82,12 @@
 }
 
 STATIC int
-xfs_inobt_alloc_block(
+__xfs_inobt_alloc_block(
 	struct xfs_btree_cur	*cur,
 	union xfs_btree_ptr	*start,
 	union xfs_btree_ptr	*new,
-	int			*stat)
+	int			*stat,
+	enum xfs_ag_resv_type	resv)
 {
 	xfs_alloc_arg_t		args;		/* block allocation args */
 	int			error;		/* error return value */
@@ -103,6 +104,7 @@
 	args.maxlen = 1;
 	args.prod = 1;
 	args.type = XFS_ALLOCTYPE_NEAR_BNO;
+	args.resv = resv;
 
 	error = xfs_alloc_vextent(&args);
 	if (error) {
@@ -123,6 +125,27 @@
 }
 
 STATIC int
+xfs_inobt_alloc_block(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_ptr	*start,
+	union xfs_btree_ptr	*new,
+	int			*stat)
+{
+	return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
+}
+
+STATIC int
+xfs_finobt_alloc_block(
+	struct xfs_btree_cur	*cur,
+	union xfs_btree_ptr	*start,
+	union xfs_btree_ptr	*new,
+	int			*stat)
+{
+	return __xfs_inobt_alloc_block(cur, start, new, stat,
+			XFS_AG_RESV_METADATA);
+}
+
+STATIC int
 xfs_inobt_free_block(
 	struct xfs_btree_cur	*cur,
 	struct xfs_buf		*bp)
@@ -328,7 +351,7 @@
 
 	.dup_cursor		= xfs_inobt_dup_cursor,
 	.set_root		= xfs_finobt_set_root,
-	.alloc_block		= xfs_inobt_alloc_block,
+	.alloc_block		= xfs_finobt_alloc_block,
 	.free_block		= xfs_inobt_free_block,
 	.get_minrecs		= xfs_inobt_get_minrecs,
 	.get_maxrecs		= xfs_inobt_get_maxrecs,
@@ -478,3 +501,64 @@
 	return 0;
 }
 #endif	/* DEBUG */
+
+static xfs_extlen_t
+xfs_inobt_max_size(
+	struct xfs_mount	*mp)
+{
+	/* Bail out if we're uninitialized, which can happen in mkfs. */
+	if (mp->m_inobt_mxr[0] == 0)
+		return 0;
+
+	return xfs_btree_calc_size(mp, mp->m_inobt_mnr,
+		(uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock /
+				XFS_INODES_PER_CHUNK);
+}
+
+static int
+xfs_inobt_count_blocks(
+	struct xfs_mount	*mp,
+	xfs_agnumber_t		agno,
+	xfs_btnum_t		btnum,
+	xfs_extlen_t		*tree_blocks)
+{
+	struct xfs_buf		*agbp;
+	struct xfs_btree_cur	*cur;
+	int			error;
+
+	error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
+	if (error)
+		return error;
+
+	cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum);
+	error = xfs_btree_count_blocks(cur, tree_blocks);
+	xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+	xfs_buf_relse(agbp);
+
+	return error;
+}
+
+/*
+ * Figure out how many blocks to reserve and how many are used by this btree.
+ */
+int
+xfs_finobt_calc_reserves(
+	struct xfs_mount	*mp,
+	xfs_agnumber_t		agno,
+	xfs_extlen_t		*ask,
+	xfs_extlen_t		*used)
+{
+	xfs_extlen_t		tree_len = 0;
+	int			error;
+
+	if (!xfs_sb_version_hasfinobt(&mp->m_sb))
+		return 0;
+
+	error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len);
+	if (error)
+		return error;
+
+	*ask += xfs_inobt_max_size(mp);
+	*used += tree_len;
+	return 0;
+}
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.h b/fs/xfs/libxfs/xfs_ialloc_btree.h
index bd88453..aa81e2e 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.h
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.h
@@ -72,4 +72,7 @@
 #define xfs_inobt_rec_check_count(mp, rec)	0
 #endif	/* DEBUG */
 
+int xfs_finobt_calc_reserves(struct xfs_mount *mp, xfs_agnumber_t agno,
+		xfs_extlen_t *ask, xfs_extlen_t *used);
+
 #endif	/* __XFS_IALLOC_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 222e103..25c1e07 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -26,6 +26,7 @@
 #include "xfs_inode.h"
 #include "xfs_trans.h"
 #include "xfs_inode_item.h"
+#include "xfs_btree.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_bmap.h"
 #include "xfs_error.h"
@@ -429,11 +430,13 @@
 	/* REFERENCED */
 	int			nrecs;
 	int			size;
+	int			level;
 
 	ifp = XFS_IFORK_PTR(ip, whichfork);
 	dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
 	size = XFS_BMAP_BROOT_SPACE(mp, dfp);
 	nrecs = be16_to_cpu(dfp->bb_numrecs);
+	level = be16_to_cpu(dfp->bb_level);
 
 	/*
 	 * blow out if -- fork has less extents than can fit in
@@ -446,7 +449,8 @@
 					XFS_IFORK_MAXEXT(ip, whichfork) ||
 		     XFS_BMDR_SPACE_CALC(nrecs) >
 					XFS_DFORK_SIZE(dip, mp, whichfork) ||
-		     XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
+		     XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks) ||
+		     level == 0 || level > XFS_BTREE_MAXLEVELS) {
 		xfs_warn(mp, "corrupt inode %Lu (btree).",
 					(unsigned long long) ip->i_ino);
 		XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
@@ -497,15 +501,14 @@
 	 * We know that the size is valid (it's checked in iformat_btree)
 	 */
 	ifp->if_bytes = ifp->if_real_bytes = 0;
-	ifp->if_flags |= XFS_IFEXTENTS;
 	xfs_iext_add(ifp, 0, nextents);
 	error = xfs_bmap_read_extents(tp, ip, whichfork);
 	if (error) {
 		xfs_iext_destroy(ifp);
-		ifp->if_flags &= ~XFS_IFEXTENTS;
 		return error;
 	}
 	xfs_validate_extents(ifp, nextents, XFS_EXTFMT_INODE(ip));
+	ifp->if_flags |= XFS_IFEXTENTS;
 	return 0;
 }
 /*
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 06763f5..0457abe 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -279,54 +279,49 @@
 	struct xfs_ioend	*ioend =
 		container_of(work, struct xfs_ioend, io_work);
 	struct xfs_inode	*ip = XFS_I(ioend->io_inode);
+	xfs_off_t		offset = ioend->io_offset;
+	size_t			size = ioend->io_size;
 	int			error = ioend->io_bio->bi_error;
 
 	/*
-	 * Set an error if the mount has shut down and proceed with end I/O
-	 * processing so it can perform whatever cleanups are necessary.
+	 * Just clean up the in-memory strutures if the fs has been shut down.
 	 */
-	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
 		error = -EIO;
-
-	/*
-	 * For a CoW extent, we need to move the mapping from the CoW fork
-	 * to the data fork.  If instead an error happened, just dump the
-	 * new blocks.
-	 */
-	if (ioend->io_type == XFS_IO_COW) {
-		if (error)
-			goto done;
-		if (ioend->io_bio->bi_error) {
-			error = xfs_reflink_cancel_cow_range(ip,
-					ioend->io_offset, ioend->io_size);
-			goto done;
-		}
-		error = xfs_reflink_end_cow(ip, ioend->io_offset,
-				ioend->io_size);
-		if (error)
-			goto done;
+		goto done;
 	}
 
 	/*
-	 * For unwritten extents we need to issue transactions to convert a
-	 * range to normal written extens after the data I/O has finished.
-	 * Detecting and handling completion IO errors is done individually
-	 * for each case as different cleanup operations need to be performed
-	 * on error.
+	 * Clean up any COW blocks on an I/O error.
 	 */
-	if (ioend->io_type == XFS_IO_UNWRITTEN) {
-		if (error)
-			goto done;
-		error = xfs_iomap_write_unwritten(ip, ioend->io_offset,
-						  ioend->io_size);
-	} else if (ioend->io_append_trans) {
-		error = xfs_setfilesize_ioend(ioend, error);
-	} else {
-		ASSERT(!xfs_ioend_is_append(ioend) ||
-		       ioend->io_type == XFS_IO_COW);
+	if (unlikely(error)) {
+		switch (ioend->io_type) {
+		case XFS_IO_COW:
+			xfs_reflink_cancel_cow_range(ip, offset, size, true);
+			break;
+		}
+
+		goto done;
+	}
+
+	/*
+	 * Success:  commit the COW or unwritten blocks if needed.
+	 */
+	switch (ioend->io_type) {
+	case XFS_IO_COW:
+		error = xfs_reflink_end_cow(ip, offset, size);
+		break;
+	case XFS_IO_UNWRITTEN:
+		error = xfs_iomap_write_unwritten(ip, offset, size);
+		break;
+	default:
+		ASSERT(!xfs_ioend_is_append(ioend) || ioend->io_append_trans);
+		break;
 	}
 
 done:
+	if (ioend->io_append_trans)
+		error = xfs_setfilesize_ioend(ioend, error);
 	xfs_destroy_ioend(ioend, error);
 }
 
@@ -486,6 +481,12 @@
 	struct xfs_ioend	*ioend,
 	int			status)
 {
+	/* Convert CoW extents to regular */
+	if (!status && ioend->io_type == XFS_IO_COW) {
+		status = xfs_reflink_convert_cow(XFS_I(ioend->io_inode),
+				ioend->io_offset, ioend->io_size);
+	}
+
 	/* Reserve log space if we might write beyond the on-disk inode size. */
 	if (!status &&
 	    ioend->io_type != XFS_IO_UNWRITTEN &&
@@ -1257,44 +1258,6 @@
 	bh_result->b_size = mapping_size;
 }
 
-/* Bounce unaligned directio writes to the page cache. */
-static int
-xfs_bounce_unaligned_dio_write(
-	struct xfs_inode	*ip,
-	xfs_fileoff_t		offset_fsb,
-	struct xfs_bmbt_irec	*imap)
-{
-	struct xfs_bmbt_irec	irec;
-	xfs_fileoff_t		delta;
-	bool			shared;
-	bool			x;
-	int			error;
-
-	irec = *imap;
-	if (offset_fsb > irec.br_startoff) {
-		delta = offset_fsb - irec.br_startoff;
-		irec.br_blockcount -= delta;
-		irec.br_startblock += delta;
-		irec.br_startoff = offset_fsb;
-	}
-	error = xfs_reflink_trim_around_shared(ip, &irec, &shared, &x);
-	if (error)
-		return error;
-
-	/*
-	 * We're here because we're trying to do a directio write to a
-	 * region that isn't aligned to a filesystem block.  If any part
-	 * of the extent is shared, fall back to buffered mode to handle
-	 * the RMW.  This is done by returning -EREMCHG ("remote addr
-	 * changed"), which is caught further up the call stack.
-	 */
-	if (shared) {
-		trace_xfs_reflink_bounce_dio_write(ip, imap);
-		return -EREMCHG;
-	}
-	return 0;
-}
-
 STATIC int
 __xfs_get_blocks(
 	struct inode		*inode,
@@ -1432,13 +1395,6 @@
 	if (imap.br_startblock != HOLESTARTBLOCK &&
 	    imap.br_startblock != DELAYSTARTBLOCK &&
 	    (create || !ISUNWRITTEN(&imap))) {
-		if (create && direct && !is_cow) {
-			error = xfs_bounce_unaligned_dio_write(ip, offset_fsb,
-					&imap);
-			if (error)
-				return error;
-		}
-
 		xfs_map_buffer(inode, bh_result, &imap, offset);
 		if (ISUNWRITTEN(&imap))
 			set_buffer_unwritten(bh_result);
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index efb8ccd..5c395e4 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -917,17 +917,18 @@
  */
 int
 xfs_free_eofblocks(
-	xfs_mount_t	*mp,
-	xfs_inode_t	*ip,
-	bool		need_iolock)
+	struct xfs_inode	*ip)
 {
-	xfs_trans_t	*tp;
-	int		error;
-	xfs_fileoff_t	end_fsb;
-	xfs_fileoff_t	last_fsb;
-	xfs_filblks_t	map_len;
-	int		nimaps;
-	xfs_bmbt_irec_t	imap;
+	struct xfs_trans	*tp;
+	int			error;
+	xfs_fileoff_t		end_fsb;
+	xfs_fileoff_t		last_fsb;
+	xfs_filblks_t		map_len;
+	int			nimaps;
+	struct xfs_bmbt_irec	imap;
+	struct xfs_mount	*mp = ip->i_mount;
+
+	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
 
 	/*
 	 * Figure out if there are any blocks beyond the end
@@ -944,6 +945,10 @@
 	error = xfs_bmapi_read(ip, end_fsb, map_len, &imap, &nimaps, 0);
 	xfs_iunlock(ip, XFS_ILOCK_SHARED);
 
+	/*
+	 * If there are blocks after the end of file, truncate the file to its
+	 * current size to free them up.
+	 */
 	if (!error && (nimaps != 0) &&
 	    (imap.br_startblock != HOLESTARTBLOCK ||
 	     ip->i_delayed_blks)) {
@@ -954,22 +959,13 @@
 		if (error)
 			return error;
 
-		/*
-		 * There are blocks after the end of file.
-		 * Free them up now by truncating the file to
-		 * its current size.
-		 */
-		if (need_iolock) {
-			if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL))
-				return -EAGAIN;
-		}
+		/* wait on dio to ensure i_size has settled */
+		inode_dio_wait(VFS_I(ip));
 
 		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate, 0, 0, 0,
 				&tp);
 		if (error) {
 			ASSERT(XFS_FORCED_SHUTDOWN(mp));
-			if (need_iolock)
-				xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 			return error;
 		}
 
@@ -997,8 +993,6 @@
 		}
 
 		xfs_iunlock(ip, XFS_ILOCK_EXCL);
-		if (need_iolock)
-			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 	}
 	return error;
 }
@@ -1393,10 +1387,16 @@
 	xfs_fileoff_t		stop_fsb;
 	xfs_fileoff_t		next_fsb;
 	xfs_fileoff_t		shift_fsb;
+	uint			resblks;
 
 	ASSERT(direction == SHIFT_LEFT || direction == SHIFT_RIGHT);
 
 	if (direction == SHIFT_LEFT) {
+		/*
+		 * Reserve blocks to cover potential extent merges after left
+		 * shift operations.
+		 */
+		resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0);
 		next_fsb = XFS_B_TO_FSB(mp, offset + len);
 		stop_fsb = XFS_B_TO_FSB(mp, VFS_I(ip)->i_size);
 	} else {
@@ -1404,6 +1404,7 @@
 		 * If right shift, delegate the work of initialization of
 		 * next_fsb to xfs_bmap_shift_extent as it has ilock held.
 		 */
+		resblks = 0;
 		next_fsb = NULLFSBLOCK;
 		stop_fsb = XFS_B_TO_FSB(mp, offset);
 	}
@@ -1415,7 +1416,7 @@
 	 * into the accessible region of the file.
 	 */
 	if (xfs_can_free_eofblocks(ip, true)) {
-		error = xfs_free_eofblocks(mp, ip, false);
+		error = xfs_free_eofblocks(ip);
 		if (error)
 			return error;
 	}
@@ -1445,21 +1446,14 @@
 	}
 
 	while (!error && !done) {
-		/*
-		 * We would need to reserve permanent block for transaction.
-		 * This will come into picture when after shifting extent into
-		 * hole we found that adjacent extents can be merged which
-		 * may lead to freeing of a block during record update.
-		 */
-		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write,
-				XFS_DIOSTRAT_SPACE_RES(mp, 0), 0, 0, &tp);
+		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 0,
+					&tp);
 		if (error)
 			break;
 
 		xfs_ilock(ip, XFS_ILOCK_EXCL);
 		error = xfs_trans_reserve_quota(tp, mp, ip->i_udquot,
-				ip->i_gdquot, ip->i_pdquot,
-				XFS_DIOSTRAT_SPACE_RES(mp, 0), 0,
+				ip->i_gdquot, ip->i_pdquot, resblks, 0,
 				XFS_QMOPT_RES_REGBLKS);
 		if (error)
 			goto out_trans_cancel;
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 68a621a..f100539 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -63,8 +63,7 @@
 
 /* EOF block manipulation functions */
 bool	xfs_can_free_eofblocks(struct xfs_inode *ip, bool force);
-int	xfs_free_eofblocks(struct xfs_mount *mp, struct xfs_inode *ip,
-			   bool need_iolock);
+int	xfs_free_eofblocks(struct xfs_inode *ip);
 
 int	xfs_swap_extents(struct xfs_inode *ip, struct xfs_inode *tip,
 			 struct xfs_swapext *sx);
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 2975cb2..0306168 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -1162,6 +1162,7 @@
 	 */
 	bp->b_last_error = 0;
 	bp->b_retries = 0;
+	bp->b_first_retry_time = 0;
 
 	xfs_buf_do_callbacks(bp);
 	bp->b_fspriv = NULL;
diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
index 162dc18..29c2f99 100644
--- a/fs/xfs/xfs_extent_busy.c
+++ b/fs/xfs/xfs_extent_busy.c
@@ -45,18 +45,7 @@
 	struct rb_node		**rbp;
 	struct rb_node		*parent = NULL;
 
-	new = kmem_zalloc(sizeof(struct xfs_extent_busy), KM_MAYFAIL);
-	if (!new) {
-		/*
-		 * No Memory!  Since it is now not possible to track the free
-		 * block, make this a synchronous transaction to insure that
-		 * the block is not reused before this transaction commits.
-		 */
-		trace_xfs_extent_busy_enomem(tp->t_mountp, agno, bno, len);
-		xfs_trans_set_sync(tp);
-		return;
-	}
-
+	new = kmem_zalloc(sizeof(struct xfs_extent_busy), KM_SLEEP);
 	new->agno = agno;
 	new->bno = bno;
 	new->length = len;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 9a5d64b..1209ad2 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -554,6 +554,15 @@
 	if ((iocb->ki_pos & mp->m_blockmask) ||
 	    ((iocb->ki_pos + count) & mp->m_blockmask)) {
 		unaligned_io = 1;
+
+		/*
+		 * We can't properly handle unaligned direct I/O to reflink
+		 * files yet, as we can't unshare a partial block.
+		 */
+		if (xfs_is_reflink_inode(ip)) {
+			trace_xfs_reflink_bounce_dio_write(ip, iocb->ki_pos, count);
+			return -EREMCHG;
+		}
 		iolock = XFS_IOLOCK_EXCL;
 	} else {
 		iolock = XFS_IOLOCK_SHARED;
@@ -675,8 +684,10 @@
 	struct xfs_inode	*ip = XFS_I(inode);
 	ssize_t			ret;
 	int			enospc = 0;
-	int			iolock = XFS_IOLOCK_EXCL;
+	int			iolock;
 
+write_retry:
+	iolock = XFS_IOLOCK_EXCL;
 	xfs_rw_ilock(ip, iolock);
 
 	ret = xfs_file_aio_write_checks(iocb, from, &iolock);
@@ -686,7 +697,6 @@
 	/* We can write back this queue in page reclaim */
 	current->backing_dev_info = inode_to_bdi(inode);
 
-write_retry:
 	trace_xfs_file_buffered_write(ip, iov_iter_count(from), iocb->ki_pos);
 	ret = iomap_file_buffered_write(iocb, from, &xfs_iomap_ops);
 	if (likely(ret >= 0))
@@ -702,18 +712,21 @@
 	 * running at the same time.
 	 */
 	if (ret == -EDQUOT && !enospc) {
+		xfs_rw_iunlock(ip, iolock);
 		enospc = xfs_inode_free_quota_eofblocks(ip);
 		if (enospc)
 			goto write_retry;
 		enospc = xfs_inode_free_quota_cowblocks(ip);
 		if (enospc)
 			goto write_retry;
+		iolock = 0;
 	} else if (ret == -ENOSPC && !enospc) {
 		struct xfs_eofblocks eofb = {0};
 
 		enospc = 1;
 		xfs_flush_inodes(ip->i_mount);
-		eofb.eof_scan_owner = ip->i_ino; /* for locking */
+
+		xfs_rw_iunlock(ip, iolock);
 		eofb.eof_flags = XFS_EOF_FLAGS_SYNC;
 		xfs_icache_free_eofblocks(ip->i_mount, &eofb);
 		goto write_retry;
@@ -721,7 +734,8 @@
 
 	current->backing_dev_info = NULL;
 out:
-	xfs_rw_iunlock(ip, iolock);
+	if (iolock)
+		xfs_rw_iunlock(ip, iolock);
 	return ret;
 }
 
@@ -987,9 +1001,9 @@
 	 */
 	mode = xfs_ilock_data_map_shared(ip);
 	if (ip->i_d.di_nextents > 0)
-		xfs_dir3_data_readahead(ip, 0, -1);
+		error = xfs_dir3_data_readahead(ip, 0, -1);
 	xfs_iunlock(ip, mode);
-	return 0;
+	return error;
 }
 
 STATIC int
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index 29cc988..3fb1f3f 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -1324,13 +1324,10 @@
 	int			flags,
 	void			*args)
 {
-	int ret;
+	int ret = 0;
 	struct xfs_eofblocks *eofb = args;
-	bool need_iolock = true;
 	int match;
 
-	ASSERT(!eofb || (eofb && eofb->eof_scan_owner != 0));
-
 	if (!xfs_can_free_eofblocks(ip, false)) {
 		/* inode could be preallocated or append-only */
 		trace_xfs_inode_free_eofblocks_invalid(ip);
@@ -1358,21 +1355,19 @@
 		if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
 		    XFS_ISIZE(ip) < eofb->eof_min_file_size)
 			return 0;
-
-		/*
-		 * A scan owner implies we already hold the iolock. Skip it in
-		 * xfs_free_eofblocks() to avoid deadlock. This also eliminates
-		 * the possibility of EAGAIN being returned.
-		 */
-		if (eofb->eof_scan_owner == ip->i_ino)
-			need_iolock = false;
 	}
 
-	ret = xfs_free_eofblocks(ip->i_mount, ip, need_iolock);
-
-	/* don't revisit the inode if we're not waiting */
-	if (ret == -EAGAIN && !(flags & SYNC_WAIT))
-		ret = 0;
+	/*
+	 * If the caller is waiting, return -EAGAIN to keep the background
+	 * scanner moving and revisit the inode in a subsequent pass.
+	 */
+	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
+		if (flags & SYNC_WAIT)
+			ret = -EAGAIN;
+		return ret;
+	}
+	ret = xfs_free_eofblocks(ip);
+	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 
 	return ret;
 }
@@ -1419,15 +1414,10 @@
 	struct xfs_eofblocks eofb = {0};
 	struct xfs_dquot *dq;
 
-	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
-
 	/*
-	 * Set the scan owner to avoid a potential livelock. Otherwise, the scan
-	 * can repeatedly trylock on the inode we're currently processing. We
-	 * run a sync scan to increase effectiveness and use the union filter to
+	 * Run a sync scan to increase effectiveness and use the union filter to
 	 * cover all applicable quotas in a single scan.
 	 */
-	eofb.eof_scan_owner = ip->i_ino;
 	eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;
 
 	if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
@@ -1579,12 +1569,9 @@
 {
 	int ret;
 	struct xfs_eofblocks *eofb = args;
-	bool need_iolock = true;
 	int match;
 	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
 
-	ASSERT(!eofb || (eofb && eofb->eof_scan_owner != 0));
-
 	/*
 	 * Just clear the tag if we have an empty cow fork or none at all. It's
 	 * possible the inode was fully unshared since it was originally tagged.
@@ -1617,28 +1604,16 @@
 		if (eofb->eof_flags & XFS_EOF_FLAGS_MINFILESIZE &&
 		    XFS_ISIZE(ip) < eofb->eof_min_file_size)
 			return 0;
-
-		/*
-		 * A scan owner implies we already hold the iolock. Skip it in
-		 * xfs_free_eofblocks() to avoid deadlock. This also eliminates
-		 * the possibility of EAGAIN being returned.
-		 */
-		if (eofb->eof_scan_owner == ip->i_ino)
-			need_iolock = false;
 	}
 
 	/* Free the CoW blocks */
-	if (need_iolock) {
-		xfs_ilock(ip, XFS_IOLOCK_EXCL);
-		xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
-	}
+	xfs_ilock(ip, XFS_IOLOCK_EXCL);
+	xfs_ilock(ip, XFS_MMAPLOCK_EXCL);
 
-	ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF);
+	ret = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, false);
 
-	if (need_iolock) {
-		xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
-		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
-	}
+	xfs_iunlock(ip, XFS_MMAPLOCK_EXCL);
+	xfs_iunlock(ip, XFS_IOLOCK_EXCL);
 
 	return ret;
 }
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index a1e02f4..8a7c849 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -27,7 +27,6 @@
 	kgid_t		eof_gid;
 	prid_t		eof_prid;
 	__u64		eof_min_file_size;
-	xfs_ino_t	eof_scan_owner;
 };
 
 #define SYNC_WAIT		0x0001	/* wait for i/o to complete */
@@ -102,7 +101,6 @@
 	dst->eof_flags = src->eof_flags;
 	dst->eof_prid = src->eof_prid;
 	dst->eof_min_file_size = src->eof_min_file_size;
-	dst->eof_scan_owner = NULLFSINO;
 
 	dst->eof_uid = INVALID_UID;
 	if (src->eof_flags & XFS_EOF_FLAGS_UID) {
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 512ff13..e50636c 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1624,7 +1624,7 @@
 
 	/* Remove all pending CoW reservations. */
 	error = xfs_reflink_cancel_cow_blocks(ip, &tp, first_unmap_block,
-			last_block);
+			last_block, true);
 	if (error)
 		goto out;
 
@@ -1701,32 +1701,34 @@
 	if (xfs_can_free_eofblocks(ip, false)) {
 
 		/*
-		 * If we can't get the iolock just skip truncating the blocks
-		 * past EOF because we could deadlock with the mmap_sem
-		 * otherwise.  We'll get another chance to drop them once the
-		 * last reference to the inode is dropped, so we'll never leak
-		 * blocks permanently.
+		 * Check if the inode is being opened, written and closed
+		 * frequently and we have delayed allocation blocks outstanding
+		 * (e.g. streaming writes from the NFS server), truncating the
+		 * blocks past EOF will cause fragmentation to occur.
 		 *
-		 * Further, check if the inode is being opened, written and
-		 * closed frequently and we have delayed allocation blocks
-		 * outstanding (e.g. streaming writes from the NFS server),
-		 * truncating the blocks past EOF will cause fragmentation to
-		 * occur.
-		 *
-		 * In this case don't do the truncation, either, but we have to
-		 * be careful how we detect this case. Blocks beyond EOF show
-		 * up as i_delayed_blks even when the inode is clean, so we
-		 * need to truncate them away first before checking for a dirty
-		 * release. Hence on the first dirty close we will still remove
-		 * the speculative allocation, but after that we will leave it
-		 * in place.
+		 * In this case don't do the truncation, but we have to be
+		 * careful how we detect this case. Blocks beyond EOF show up as
+		 * i_delayed_blks even when the inode is clean, so we need to
+		 * truncate them away first before checking for a dirty release.
+		 * Hence on the first dirty close we will still remove the
+		 * speculative allocation, but after that we will leave it in
+		 * place.
 		 */
 		if (xfs_iflags_test(ip, XFS_IDIRTY_RELEASE))
 			return 0;
-
-		error = xfs_free_eofblocks(mp, ip, true);
-		if (error && error != -EAGAIN)
-			return error;
+		/*
+		 * If we can't get the iolock just skip truncating the blocks
+		 * past EOF because we could deadlock with the mmap_sem
+		 * otherwise. We'll get another chance to drop them once the
+		 * last reference to the inode is dropped, so we'll never leak
+		 * blocks permanently.
+		 */
+		if (xfs_ilock_nowait(ip, XFS_IOLOCK_EXCL)) {
+			error = xfs_free_eofblocks(ip);
+			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+			if (error)
+				return error;
+		}
 
 		/* delalloc blocks after truncation means it really is dirty */
 		if (ip->i_delayed_blks)
@@ -1801,22 +1803,23 @@
 	int			error;
 
 	/*
-	 * The ifree transaction might need to allocate blocks for record
-	 * insertion to the finobt. We don't want to fail here at ENOSPC, so
-	 * allow ifree to dip into the reserved block pool if necessary.
-	 *
-	 * Freeing large sets of inodes generally means freeing inode chunks,
-	 * directory and file data blocks, so this should be relatively safe.
-	 * Only under severe circumstances should it be possible to free enough
-	 * inodes to exhaust the reserve block pool via finobt expansion while
-	 * at the same time not creating free space in the filesystem.
+	 * We try to use a per-AG reservation for any block needed by the finobt
+	 * tree, but as the finobt feature predates the per-AG reservation
+	 * support a degraded file system might not have enough space for the
+	 * reservation at mount time.  In that case try to dip into the reserved
+	 * pool and pray.
 	 *
 	 * Send a warning if the reservation does happen to fail, as the inode
 	 * now remains allocated and sits on the unlinked list until the fs is
 	 * repaired.
 	 */
-	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
-			XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
+	if (unlikely(mp->m_inotbt_nores)) {
+		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
+				XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
+				&tp);
+	} else {
+		error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
+	}
 	if (error) {
 		if (error == -ENOSPC) {
 			xfs_warn_ratelimited(mp,
@@ -1912,8 +1915,11 @@
 		 * cache. Post-eof blocks must be freed, lest we end up with
 		 * broken free space accounting.
 		 */
-		if (xfs_can_free_eofblocks(ip, true))
-			xfs_free_eofblocks(mp, ip, false);
+		if (xfs_can_free_eofblocks(ip, true)) {
+			xfs_ilock(ip, XFS_IOLOCK_EXCL);
+			xfs_free_eofblocks(ip);
+			xfs_iunlock(ip, XFS_IOLOCK_EXCL);
+		}
 
 		return;
 	}
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index e888961..3605624 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -637,6 +637,11 @@
 		goto out_unlock;
 	}
 
+	/*
+	 * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch
+	 * them out if the write happens to fail.
+	 */
+	iomap->flags = IOMAP_F_NEW;
 	trace_xfs_iomap_alloc(ip, offset, count, 0, &got);
 done:
 	if (isnullstartblock(got.br_startblock))
@@ -685,7 +690,7 @@
 	int		nres;
 
 	if (whichfork == XFS_COW_FORK)
-		flags |= XFS_BMAPI_COWFORK;
+		flags |= XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC;
 
 	/*
 	 * Make sure that the dquots are there.
@@ -1061,7 +1066,8 @@
 	struct xfs_inode	*ip,
 	loff_t			offset,
 	loff_t			length,
-	ssize_t			written)
+	ssize_t			written,
+	struct iomap		*iomap)
 {
 	struct xfs_mount	*mp = ip->i_mount;
 	xfs_fileoff_t		start_fsb;
@@ -1080,14 +1086,14 @@
 	end_fsb = XFS_B_TO_FSB(mp, offset + length);
 
 	/*
-	 * Trim back delalloc blocks if we didn't manage to write the whole
-	 * range reserved.
+	 * Trim delalloc blocks if they were allocated by this write and we
+	 * didn't manage to write the whole range.
 	 *
 	 * We don't need to care about racing delalloc as we hold i_mutex
 	 * across the reserve/allocate/unreserve calls. If there are delalloc
 	 * blocks in the range, they are ours.
 	 */
-	if (start_fsb < end_fsb) {
+	if ((iomap->flags & IOMAP_F_NEW) && start_fsb < end_fsb) {
 		truncate_pagecache_range(VFS_I(ip), XFS_FSB_TO_B(mp, start_fsb),
 					 XFS_FSB_TO_B(mp, end_fsb) - 1);
 
@@ -1117,7 +1123,7 @@
 {
 	if ((flags & IOMAP_WRITE) && iomap->type == IOMAP_DELALLOC)
 		return xfs_file_iomap_end_delalloc(XFS_I(inode), offset,
-				length, written);
+				length, written, iomap);
 	return 0;
 }
 
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index b341f10..13796f2 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -502,8 +502,7 @@
 xfs_set_inoalignment(xfs_mount_t *mp)
 {
 	if (xfs_sb_version_hasalign(&mp->m_sb) &&
-	    mp->m_sb.sb_inoalignmt >=
-	    XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
+		mp->m_sb.sb_inoalignmt >= xfs_icluster_size_fsb(mp))
 		mp->m_inoalign_mask = mp->m_sb.sb_inoalignmt - 1;
 	else
 		mp->m_inoalign_mask = 0;
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 819b80b..1bf878b 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -140,6 +140,7 @@
 	int			m_fixedfsid[2];	/* unchanged for life of FS */
 	uint			m_dmevmask;	/* DMI events for this FS */
 	__uint64_t		m_flags;	/* global mount flags */
+	bool			m_inotbt_nores; /* no per-AG finobt resv. */
 	int			m_ialloc_inos;	/* inodes in inode allocation */
 	int			m_ialloc_blks;	/* blocks in inode allocation */
 	int			m_ialloc_min_blks;/* min blocks in sparse inode
diff --git a/fs/xfs/xfs_reflink.c b/fs/xfs/xfs_reflink.c
index 4d3f74e..2252f16 100644
--- a/fs/xfs/xfs_reflink.c
+++ b/fs/xfs/xfs_reflink.c
@@ -82,11 +82,22 @@
  * mappings are a reservation against the free space in the filesystem;
  * adjacent mappings can also be combined into fewer larger mappings.
  *
+ * As an optimization, the CoW extent size hint (cowextsz) creates
+ * outsized aligned delalloc reservations in the hope of landing out of
+ * order nearby CoW writes in a single extent on disk, thereby reducing
+ * fragmentation and improving future performance.
+ *
+ * D: --RRRRRRSSSRRRRRRRR--- (data fork)
+ * C: ------DDDDDDD--------- (CoW fork)
+ *
  * When dirty pages are being written out (typically in writepage), the
- * delalloc reservations are converted into real mappings by allocating
- * blocks and replacing the delalloc mapping with real ones.  A delalloc
- * mapping can be replaced by several real ones if the free space is
- * fragmented.
+ * delalloc reservations are converted into unwritten mappings by
+ * allocating blocks and replacing the delalloc mapping with real ones.
+ * A delalloc mapping can be replaced by several unwritten ones if the
+ * free space is fragmented.
+ *
+ * D: --RRRRRRSSSRRRRRRRR---
+ * C: ------UUUUUUU---------
  *
  * We want to adapt the delalloc mechanism for copy-on-write, since the
  * write paths are similar.  The first two steps (creating the reservation
@@ -101,13 +112,29 @@
  * Block-aligned directio writes will use the same mechanism as buffered
  * writes.
  *
+ * Just prior to submitting the actual disk write requests, we convert
+ * the extents representing the range of the file actually being written
+ * (as opposed to extra pieces created for the cowextsize hint) to real
+ * extents.  This will become important in the next step:
+ *
+ * D: --RRRRRRSSSRRRRRRRR---
+ * C: ------UUrrUUU---------
+ *
  * CoW remapping must be done after the data block write completes,
  * because we don't want to destroy the old data fork map until we're sure
  * the new block has been written.  Since the new mappings are kept in a
  * separate fork, we can simply iterate these mappings to find the ones
  * that cover the file blocks that we just CoW'd.  For each extent, simply
  * unmap the corresponding range in the data fork, map the new range into
- * the data fork, and remove the extent from the CoW fork.
+ * the data fork, and remove the extent from the CoW fork.  Because of
+ * the presence of the cowextsize hint, however, we must be careful
+ * only to remap the blocks that we've actually written out --  we must
+ * never remap delalloc reservations nor CoW staging blocks that have
+ * yet to be written.  This corresponds exactly to the real extents in
+ * the CoW fork:
+ *
+ * D: --RRRRRRrrSRRRRRRRR---
+ * C: ------UU--UUU---------
  *
  * Since the remapping operation can be applied to an arbitrary file
  * range, we record the need for the remap step as a flag in the ioend
@@ -296,6 +323,65 @@
 	return 0;
 }
 
+/* Convert part of an unwritten CoW extent to a real one. */
+STATIC int
+xfs_reflink_convert_cow_extent(
+	struct xfs_inode		*ip,
+	struct xfs_bmbt_irec		*imap,
+	xfs_fileoff_t			offset_fsb,
+	xfs_filblks_t			count_fsb,
+	struct xfs_defer_ops		*dfops)
+{
+	struct xfs_bmbt_irec		irec = *imap;
+	xfs_fsblock_t			first_block;
+	int				nimaps = 1;
+
+	if (imap->br_state == XFS_EXT_NORM)
+		return 0;
+
+	xfs_trim_extent(&irec, offset_fsb, count_fsb);
+	trace_xfs_reflink_convert_cow(ip, &irec);
+	if (irec.br_blockcount == 0)
+		return 0;
+	return xfs_bmapi_write(NULL, ip, irec.br_startoff, irec.br_blockcount,
+			XFS_BMAPI_COWFORK | XFS_BMAPI_CONVERT, &first_block,
+			0, &irec, &nimaps, dfops);
+}
+
+/* Convert all of the unwritten CoW extents in a file's range to real ones. */
+int
+xfs_reflink_convert_cow(
+	struct xfs_inode	*ip,
+	xfs_off_t		offset,
+	xfs_off_t		count)
+{
+	struct xfs_bmbt_irec	got;
+	struct xfs_defer_ops	dfops;
+	struct xfs_mount	*mp = ip->i_mount;
+	struct xfs_ifork	*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
+	xfs_fileoff_t		offset_fsb = XFS_B_TO_FSBT(mp, offset);
+	xfs_fileoff_t		end_fsb = XFS_B_TO_FSB(mp, offset + count);
+	xfs_extnum_t		idx;
+	bool			found;
+	int			error = 0;
+
+	xfs_ilock(ip, XFS_ILOCK_EXCL);
+
+	/* Convert all the extents to real from unwritten. */
+	for (found = xfs_iext_lookup_extent(ip, ifp, offset_fsb, &idx, &got);
+	     found && got.br_startoff < end_fsb;
+	     found = xfs_iext_get_extent(ifp, ++idx, &got)) {
+		error = xfs_reflink_convert_cow_extent(ip, &got, offset_fsb,
+				end_fsb - offset_fsb, &dfops);
+		if (error)
+			break;
+	}
+
+	/* Finish up. */
+	xfs_iunlock(ip, XFS_ILOCK_EXCL);
+	return error;
+}
+
 /* Allocate all CoW reservations covering a range of blocks in a file. */
 static int
 __xfs_reflink_allocate_cow(
@@ -328,6 +414,7 @@
 		goto out_unlock;
 	ASSERT(nimaps == 1);
 
+	/* Make sure there's a CoW reservation for it. */
 	error = xfs_reflink_reserve_cow(ip, &imap, &shared);
 	if (error)
 		goto out_trans_cancel;
@@ -337,14 +424,16 @@
 		goto out_trans_cancel;
 	}
 
+	/* Allocate the entire reservation as unwritten blocks. */
 	xfs_trans_ijoin(tp, ip, 0);
 	error = xfs_bmapi_write(tp, ip, imap.br_startoff, imap.br_blockcount,
-			XFS_BMAPI_COWFORK, &first_block,
+			XFS_BMAPI_COWFORK | XFS_BMAPI_PREALLOC, &first_block,
 			XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK),
 			&imap, &nimaps, &dfops);
 	if (error)
 		goto out_trans_cancel;
 
+	/* Finish up. */
 	error = xfs_defer_finish(&tp, &dfops, NULL);
 	if (error)
 		goto out_trans_cancel;
@@ -389,11 +478,12 @@
 		if (error) {
 			trace_xfs_reflink_allocate_cow_range_error(ip, error,
 					_RET_IP_);
-			break;
+			return error;
 		}
 	}
 
-	return error;
+	/* Convert the CoW extents to regular. */
+	return xfs_reflink_convert_cow(ip, offset, count);
 }
 
 /*
@@ -481,14 +571,18 @@
 }
 
 /*
- * Cancel all pending CoW reservations for some block range of an inode.
+ * Cancel CoW reservations for some block range of an inode.
+ *
+ * If cancel_real is true this function cancels all COW fork extents for the
+ * inode; if cancel_real is false, real extents are not cleared.
  */
 int
 xfs_reflink_cancel_cow_blocks(
 	struct xfs_inode		*ip,
 	struct xfs_trans		**tpp,
 	xfs_fileoff_t			offset_fsb,
-	xfs_fileoff_t			end_fsb)
+	xfs_fileoff_t			end_fsb,
+	bool				cancel_real)
 {
 	struct xfs_ifork		*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
 	struct xfs_bmbt_irec		got, prev, del;
@@ -515,7 +609,7 @@
 					&idx, &got, &del);
 			if (error)
 				break;
-		} else {
+		} else if (del.br_state == XFS_EXT_UNWRITTEN || cancel_real) {
 			xfs_trans_ijoin(*tpp, ip, 0);
 			xfs_defer_init(&dfops, &firstfsb);
 
@@ -558,13 +652,17 @@
 }
 
 /*
- * Cancel all pending CoW reservations for some byte range of an inode.
+ * Cancel CoW reservations for some byte range of an inode.
+ *
+ * If cancel_real is true this function cancels all COW fork extents for the
+ * inode; if cancel_real is false, real extents are not cleared.
  */
 int
 xfs_reflink_cancel_cow_range(
 	struct xfs_inode	*ip,
 	xfs_off_t		offset,
-	xfs_off_t		count)
+	xfs_off_t		count,
+	bool			cancel_real)
 {
 	struct xfs_trans	*tp;
 	xfs_fileoff_t		offset_fsb;
@@ -590,7 +688,8 @@
 	xfs_trans_ijoin(tp, ip, 0);
 
 	/* Scrape out the old CoW reservations */
-	error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb);
+	error = xfs_reflink_cancel_cow_blocks(ip, &tp, offset_fsb, end_fsb,
+			cancel_real);
 	if (error)
 		goto out_cancel;
 
@@ -669,6 +768,16 @@
 
 		ASSERT(!isnullstartblock(got.br_startblock));
 
+		/*
+		 * Don't remap unwritten extents; these are
+		 * speculatively preallocated CoW extents that have been
+		 * allocated but have not yet been involved in a write.
+		 */
+		if (got.br_state == XFS_EXT_UNWRITTEN) {
+			idx--;
+			goto next_extent;
+		}
+
 		/* Unmap the old blocks in the data fork. */
 		xfs_defer_init(&dfops, &firstfsb);
 		rlen = del.br_blockcount;
@@ -885,13 +994,14 @@
 xfs_reflink_update_dest(
 	struct xfs_inode	*dest,
 	xfs_off_t		newlen,
-	xfs_extlen_t		cowextsize)
+	xfs_extlen_t		cowextsize,
+	bool			is_dedupe)
 {
 	struct xfs_mount	*mp = dest->i_mount;
 	struct xfs_trans	*tp;
 	int			error;
 
-	if (newlen <= i_size_read(VFS_I(dest)) && cowextsize == 0)
+	if (is_dedupe && newlen <= i_size_read(VFS_I(dest)) && cowextsize == 0)
 		return 0;
 
 	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ichange, 0, 0, 0, &tp);
@@ -912,6 +1022,10 @@
 		dest->i_d.di_flags2 |= XFS_DIFLAG2_COWEXTSIZE;
 	}
 
+	if (!is_dedupe) {
+		xfs_trans_ichgtime(tp, dest,
+				   XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
+	}
 	xfs_trans_log_inode(tp, dest, XFS_ILOG_CORE);
 
 	error = xfs_trans_commit(tp);
@@ -1428,7 +1542,8 @@
 	    !(dest->i_d.di_flags2 & XFS_DIFLAG2_COWEXTSIZE))
 		cowextsize = src->i_d.di_cowextsize;
 
-	ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize);
+	ret = xfs_reflink_update_dest(dest, pos_out + len, cowextsize,
+			is_dedupe);
 
 out_unlock:
 	xfs_iunlock(src, XFS_MMAPLOCK_EXCL);
@@ -1580,7 +1695,7 @@
 	 * We didn't find any shared blocks so turn off the reflink flag.
 	 * First, get rid of any leftover CoW mappings.
 	 */
-	error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF);
+	error = xfs_reflink_cancel_cow_blocks(ip, tpp, 0, NULLFILEOFF, true);
 	if (error)
 		return error;
 
diff --git a/fs/xfs/xfs_reflink.h b/fs/xfs/xfs_reflink.h
index 97ea9b4..a57966f 100644
--- a/fs/xfs/xfs_reflink.h
+++ b/fs/xfs/xfs_reflink.h
@@ -30,6 +30,8 @@
 		struct xfs_bmbt_irec *imap, bool *shared);
 extern int xfs_reflink_allocate_cow_range(struct xfs_inode *ip,
 		xfs_off_t offset, xfs_off_t count);
+extern int xfs_reflink_convert_cow(struct xfs_inode *ip, xfs_off_t offset,
+		xfs_off_t count);
 extern bool xfs_reflink_find_cow_mapping(struct xfs_inode *ip, xfs_off_t offset,
 		struct xfs_bmbt_irec *imap, bool *need_alloc);
 extern int xfs_reflink_trim_irec_to_next_cow(struct xfs_inode *ip,
@@ -37,9 +39,9 @@
 
 extern int xfs_reflink_cancel_cow_blocks(struct xfs_inode *ip,
 		struct xfs_trans **tpp, xfs_fileoff_t offset_fsb,
-		xfs_fileoff_t end_fsb);
+		xfs_fileoff_t end_fsb, bool cancel_real);
 extern int xfs_reflink_cancel_cow_range(struct xfs_inode *ip, xfs_off_t offset,
-		xfs_off_t count);
+		xfs_off_t count, bool cancel_real);
 extern int xfs_reflink_end_cow(struct xfs_inode *ip, xfs_off_t offset,
 		xfs_off_t count);
 extern int xfs_reflink_recover_cow(struct xfs_mount *mp);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index ade4691..dbbd3f1 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -948,7 +948,7 @@
 	XFS_STATS_INC(ip->i_mount, vn_remove);
 
 	if (xfs_is_reflink_inode(ip)) {
-		error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF);
+		error = xfs_reflink_cancel_cow_range(ip, 0, NULLFILEOFF, true);
 		if (error && !XFS_FORCED_SHUTDOWN(ip->i_mount))
 			xfs_warn(ip->i_mount,
 "Error %d while evicting CoW blocks for inode %llu.",
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 0907752..828f383 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -3183,6 +3183,7 @@
 		__field(xfs_fileoff_t, lblk)
 		__field(xfs_extlen_t, len)
 		__field(xfs_fsblock_t, pblk)
+		__field(int, state)
 	),
 	TP_fast_assign(
 		__entry->dev = VFS_I(ip)->i_sb->s_dev;
@@ -3190,13 +3191,15 @@
 		__entry->lblk = irec->br_startoff;
 		__entry->len = irec->br_blockcount;
 		__entry->pblk = irec->br_startblock;
+		__entry->state = irec->br_state;
 	),
-	TP_printk("dev %d:%d ino 0x%llx lblk 0x%llx len 0x%x pblk %llu",
+	TP_printk("dev %d:%d ino 0x%llx lblk 0x%llx len 0x%x pblk %llu st %d",
 		  MAJOR(__entry->dev), MINOR(__entry->dev),
 		  __entry->ino,
 		  __entry->lblk,
 		  __entry->len,
-		  __entry->pblk)
+		  __entry->pblk,
+		  __entry->state)
 );
 #define DEFINE_INODE_IREC_EVENT(name) \
 DEFINE_EVENT(xfs_inode_irec_class, name, \
@@ -3345,11 +3348,12 @@
 DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_alloc);
 DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_found);
 DEFINE_INODE_IREC_EVENT(xfs_reflink_cow_enospc);
+DEFINE_INODE_IREC_EVENT(xfs_reflink_convert_cow);
 
 DEFINE_RW_EVENT(xfs_reflink_reserve_cow);
 DEFINE_RW_EVENT(xfs_reflink_allocate_cow_range);
 
-DEFINE_INODE_IREC_EVENT(xfs_reflink_bounce_dio_write);
+DEFINE_SIMPLE_IO_EVENT(xfs_reflink_bounce_dio_write);
 DEFINE_IOMAP_EVENT(xfs_reflink_find_cow_mapping);
 DEFINE_INODE_IREC_EVENT(xfs_reflink_trim_irec);
 
diff --git a/include/crypto/ice.h b/include/crypto/ice.h
new file mode 100644
index 0000000..558d136
--- /dev/null
+++ b/include/crypto/ice.h
@@ -0,0 +1,79 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QCOM_INLINE_CRYPTO_ENGINE_H_
+#define _QCOM_INLINE_CRYPTO_ENGINE_H_
+
+#include <linux/platform_device.h>
+
+struct request;
+
+enum ice_cryto_algo_mode {
+	ICE_CRYPTO_ALGO_MODE_AES_ECB = 0x0,
+	ICE_CRYPTO_ALGO_MODE_AES_XTS = 0x3,
+};
+
+enum ice_crpto_key_size {
+	ICE_CRYPTO_KEY_SIZE_128 = 0x0,
+	ICE_CRYPTO_KEY_SIZE_256 = 0x2,
+};
+
+enum ice_crpto_key_mode {
+	ICE_CRYPTO_USE_KEY0_HW_KEY = 0x0,
+	ICE_CRYPTO_USE_KEY1_HW_KEY = 0x1,
+	ICE_CRYPTO_USE_LUT_SW_KEY0 = 0x2,
+	ICE_CRYPTO_USE_LUT_SW_KEY  = 0x3
+};
+
+struct ice_crypto_setting {
+	enum ice_crpto_key_size		key_size;
+	enum ice_cryto_algo_mode	algo_mode;
+	enum ice_crpto_key_mode		key_mode;
+	short				key_index;
+
+};
+
+struct ice_data_setting {
+	struct ice_crypto_setting	crypto_data;
+	bool				sw_forced_context_switch;
+	bool				decr_bypass;
+	bool				encr_bypass;
+};
+
+typedef void (*ice_error_cb)(void *, u32 error);
+
+struct qcom_ice_variant_ops *qcom_ice_get_variant_ops(struct device_node *node);
+struct platform_device *qcom_ice_get_pdevice(struct device_node *node);
+
+#ifdef CONFIG_CRYPTO_DEV_QCOM_ICE
+int qcom_ice_setup_ice_hw(const char *storage_type, int enable);
+#else
+static inline int qcom_ice_setup_ice_hw(const char *storage_type, int enable)
+{
+	return 0;
+}
+#endif
+
+struct qcom_ice_variant_ops {
+	const char *name;
+	int	(*init)(struct platform_device *, void *, ice_error_cb);
+	int	(*reset)(struct platform_device *);
+	int	(*resume)(struct platform_device *);
+	int	(*suspend)(struct platform_device *);
+	int	(*config_start)(struct platform_device *, struct request *,
+				struct ice_data_setting *, bool);
+	int	(*config_end)(struct request *);
+	int	(*status)(struct platform_device *);
+	void	(*debug)(struct platform_device *);
+};
+
+#endif /* _QCOM_INLINE_CRYPTO_ENGINE_H_ */
diff --git a/include/dt-bindings/clock/mdss-10nm-pll-clk.h b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
new file mode 100644
index 0000000..c1350ce
--- /dev/null
+++ b/include/dt-bindings/clock/mdss-10nm-pll-clk.h
@@ -0,0 +1,37 @@
+
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MDSS_10NM_PLL_CLK_H
+#define __MDSS_10NM_PLL_CLK_H
+
+/* DSI PLL clocks */
+#define VCO_CLK_0		0
+#define BITCLK_SRC_0_CLK	1
+#define BYTECLK_SRC_0_CLK	2
+#define POST_BIT_DIV_0_CLK	3
+#define POST_VCO_DIV_0_CLK	4
+#define BYTECLK_MUX_0_CLK	5
+#define PCLK_SRC_MUX_0_CLK	6
+#define PCLK_SRC_0_CLK		7
+#define PCLK_MUX_0_CLK		8
+#define VCO_CLK_1		9
+#define BITCLK_SRC_1_CLK	10
+#define BYTECLK_SRC_1_CLK	11
+#define POST_BIT_DIV_1_CLK	12
+#define POST_VCO_DIV_1_CLK	13
+#define BYTECLK_MUX_1_CLK	14
+#define PCLK_SRC_MUX_1_CLK	15
+#define PCLK_SRC_1_CLK		16
+#define PCLK_MUX_1_CLK		17
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index d52e335..96461d4 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -86,116 +86,111 @@
 #define GCC_QMIP_CAMERA_AHB_CLK					68
 #define GCC_QMIP_DISP_AHB_CLK					69
 #define GCC_QMIP_VIDEO_AHB_CLK					70
-#define GCC_QUPV3_WRAP0_CORE_2X_CLK				71
-#define GCC_QUPV3_WRAP0_CORE_2X_CLK_SRC				72
-#define GCC_QUPV3_WRAP0_CORE_CLK				73
-#define GCC_QUPV3_WRAP0_S0_CLK					74
-#define GCC_QUPV3_WRAP0_S0_CLK_SRC				75
-#define GCC_QUPV3_WRAP0_S1_CLK					76
-#define GCC_QUPV3_WRAP0_S1_CLK_SRC				77
-#define GCC_QUPV3_WRAP0_S2_CLK					78
-#define GCC_QUPV3_WRAP0_S2_CLK_SRC				79
-#define GCC_QUPV3_WRAP0_S3_CLK					80
-#define GCC_QUPV3_WRAP0_S3_CLK_SRC				81
-#define GCC_QUPV3_WRAP0_S4_CLK					82
-#define GCC_QUPV3_WRAP0_S4_CLK_SRC				83
-#define GCC_QUPV3_WRAP0_S5_CLK					84
-#define GCC_QUPV3_WRAP0_S5_CLK_SRC				85
-#define GCC_QUPV3_WRAP0_S6_CLK					86
-#define GCC_QUPV3_WRAP0_S6_CLK_SRC				87
-#define GCC_QUPV3_WRAP0_S7_CLK					88
-#define GCC_QUPV3_WRAP0_S7_CLK_SRC				89
-#define GCC_QUPV3_WRAP1_CORE_2X_CLK				90
-#define GCC_QUPV3_WRAP1_CORE_CLK				91
-#define GCC_QUPV3_WRAP1_S0_CLK					92
-#define GCC_QUPV3_WRAP1_S0_CLK_SRC				93
-#define GCC_QUPV3_WRAP1_S1_CLK					94
-#define GCC_QUPV3_WRAP1_S1_CLK_SRC				95
-#define GCC_QUPV3_WRAP1_S2_CLK					96
-#define GCC_QUPV3_WRAP1_S2_CLK_SRC				97
-#define GCC_QUPV3_WRAP1_S3_CLK					98
-#define GCC_QUPV3_WRAP1_S3_CLK_SRC				99
-#define GCC_QUPV3_WRAP1_S4_CLK					100
-#define GCC_QUPV3_WRAP1_S4_CLK_SRC				101
-#define GCC_QUPV3_WRAP1_S5_CLK					102
-#define GCC_QUPV3_WRAP1_S5_CLK_SRC				103
-#define GCC_QUPV3_WRAP1_S6_CLK					104
-#define GCC_QUPV3_WRAP1_S6_CLK_SRC				105
-#define GCC_QUPV3_WRAP1_S7_CLK					106
-#define GCC_QUPV3_WRAP1_S7_CLK_SRC				107
-#define GCC_QUPV3_WRAP_0_M_AHB_CLK				108
-#define GCC_QUPV3_WRAP_0_S_AHB_CLK				109
-#define GCC_QUPV3_WRAP_1_M_AHB_CLK				110
-#define GCC_QUPV3_WRAP_1_S_AHB_CLK				111
-#define GCC_RX1_USB2_CLKREF_CLK					112
-#define GCC_RX2_QLINK_CLKREF_CLK				113
-#define GCC_RX3_MODEM_CLKREF_CLK				114
-#define GCC_SDCC2_AHB_CLK					115
-#define GCC_SDCC2_APPS_CLK					116
-#define GCC_SDCC2_APPS_CLK_SRC					117
-#define GCC_SDCC4_AHB_CLK					118
-#define GCC_SDCC4_APPS_CLK					119
-#define GCC_SDCC4_APPS_CLK_SRC					120
-#define GCC_SYS_NOC_CPUSS_AHB_CLK				121
-#define GCC_TSIF_AHB_CLK					122
-#define GCC_TSIF_INACTIVITY_TIMERS_CLK				123
-#define GCC_TSIF_REF_CLK					124
-#define GCC_TSIF_REF_CLK_SRC					125
-#define GCC_UFS_CARD_AHB_CLK					126
-#define GCC_UFS_CARD_AXI_CLK					127
-#define GCC_UFS_CARD_AXI_CLK_SRC				128
-#define GCC_UFS_CARD_CLKREF_CLK					129
-#define GCC_UFS_CARD_ICE_CORE_CLK				130
-#define GCC_UFS_CARD_ICE_CORE_CLK_SRC				131
-#define GCC_UFS_CARD_PHY_AUX_CLK				132
-#define GCC_UFS_CARD_PHY_AUX_CLK_SRC				133
-#define GCC_UFS_CARD_RX_SYMBOL_0_CLK				134
-#define GCC_UFS_CARD_RX_SYMBOL_1_CLK				135
-#define GCC_UFS_CARD_TX_SYMBOL_0_CLK				136
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK				137
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC			138
-#define GCC_UFS_MEM_CLKREF_CLK					139
-#define GCC_UFS_PHY_AHB_CLK					140
-#define GCC_UFS_PHY_AXI_CLK					141
-#define GCC_UFS_PHY_AXI_CLK_SRC					142
-#define GCC_UFS_PHY_ICE_CORE_CLK				143
-#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				144
-#define GCC_UFS_PHY_PHY_AUX_CLK					145
-#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				146
-#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				147
-#define GCC_UFS_PHY_RX_SYMBOL_1_CLK				148
-#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				149
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK				150
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				151
-#define GCC_USB30_PRIM_MASTER_CLK				152
-#define GCC_USB30_PRIM_MASTER_CLK_SRC				153
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK				154
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			155
-#define GCC_USB30_PRIM_SLEEP_CLK				156
-#define GCC_USB30_SEC_MASTER_CLK				157
-#define GCC_USB30_SEC_MASTER_CLK_SRC				158
-#define GCC_USB30_SEC_MOCK_UTMI_CLK				159
-#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC				160
-#define GCC_USB30_SEC_SLEEP_CLK					161
-#define GCC_USB3_PRIM_CLKREF_CLK				162
-#define GCC_USB3_PRIM_PHY_AUX_CLK				163
-#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				164
-#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				165
-#define GCC_USB3_PRIM_PHY_PIPE_CLK				166
-#define GCC_USB3_SEC_CLKREF_CLK					167
-#define GCC_USB3_SEC_PHY_AUX_CLK				168
-#define GCC_USB3_SEC_PHY_AUX_CLK_SRC				169
-#define GCC_USB3_SEC_PHY_COM_AUX_CLK				170
-#define GCC_USB3_SEC_PHY_PIPE_CLK				171
-#define GCC_USB_PHY_CFG_AHB2PHY_CLK				172
-#define GCC_VIDEO_AHB_CLK					173
-#define GCC_VIDEO_AXI_CLK					174
-#define GCC_VIDEO_XO_CLK					175
-#define GPLL0							176
-#define GPLL0_OUT_EVEN						177
-#define GPLL0_OUT_MAIN						178
-#define GPLL1							179
-#define GPLL1_OUT_MAIN						180
+#define GCC_QUPV3_WRAP0_S0_CLK					71
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC				72
+#define GCC_QUPV3_WRAP0_S1_CLK					73
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC				74
+#define GCC_QUPV3_WRAP0_S2_CLK					75
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC				76
+#define GCC_QUPV3_WRAP0_S3_CLK					77
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC				78
+#define GCC_QUPV3_WRAP0_S4_CLK					79
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC				80
+#define GCC_QUPV3_WRAP0_S5_CLK					81
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC				82
+#define GCC_QUPV3_WRAP0_S6_CLK					83
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC				84
+#define GCC_QUPV3_WRAP0_S7_CLK					85
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC				86
+#define GCC_QUPV3_WRAP1_S0_CLK					87
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC				88
+#define GCC_QUPV3_WRAP1_S1_CLK					89
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC				90
+#define GCC_QUPV3_WRAP1_S2_CLK					91
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC				92
+#define GCC_QUPV3_WRAP1_S3_CLK					93
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC				94
+#define GCC_QUPV3_WRAP1_S4_CLK					95
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC				96
+#define GCC_QUPV3_WRAP1_S5_CLK					97
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC				98
+#define GCC_QUPV3_WRAP1_S6_CLK					99
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC				100
+#define GCC_QUPV3_WRAP1_S7_CLK					101
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC				102
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK				103
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK				104
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK				105
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK				106
+#define GCC_RX1_USB2_CLKREF_CLK					107
+#define GCC_RX2_QLINK_CLKREF_CLK				108
+#define GCC_RX3_MODEM_CLKREF_CLK				109
+#define GCC_SDCC2_AHB_CLK					110
+#define GCC_SDCC2_APPS_CLK					111
+#define GCC_SDCC2_APPS_CLK_SRC					112
+#define GCC_SDCC4_AHB_CLK					113
+#define GCC_SDCC4_APPS_CLK					114
+#define GCC_SDCC4_APPS_CLK_SRC					115
+#define GCC_SYS_NOC_CPUSS_AHB_CLK				116
+#define GCC_TSIF_AHB_CLK					117
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK				118
+#define GCC_TSIF_REF_CLK					119
+#define GCC_TSIF_REF_CLK_SRC					120
+#define GCC_UFS_CARD_AHB_CLK					121
+#define GCC_UFS_CARD_AXI_CLK					122
+#define GCC_UFS_CARD_AXI_CLK_SRC				123
+#define GCC_UFS_CARD_CLKREF_CLK					124
+#define GCC_UFS_CARD_ICE_CORE_CLK				125
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC				126
+#define GCC_UFS_CARD_PHY_AUX_CLK				127
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC				128
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK				129
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK				130
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK				131
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK				132
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC			133
+#define GCC_UFS_MEM_CLKREF_CLK					134
+#define GCC_UFS_PHY_AHB_CLK					135
+#define GCC_UFS_PHY_AXI_CLK					136
+#define GCC_UFS_PHY_AXI_CLK_SRC					137
+#define GCC_UFS_PHY_ICE_CORE_CLK				138
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				139
+#define GCC_UFS_PHY_PHY_AUX_CLK					140
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				141
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				142
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK				143
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				144
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK				145
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				146
+#define GCC_USB30_PRIM_MASTER_CLK				147
+#define GCC_USB30_PRIM_MASTER_CLK_SRC				148
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK				149
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			150
+#define GCC_USB30_PRIM_SLEEP_CLK				151
+#define GCC_USB30_SEC_MASTER_CLK				152
+#define GCC_USB30_SEC_MASTER_CLK_SRC				153
+#define GCC_USB30_SEC_MOCK_UTMI_CLK				154
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC				155
+#define GCC_USB30_SEC_SLEEP_CLK					156
+#define GCC_USB3_PRIM_CLKREF_CLK				157
+#define GCC_USB3_PRIM_PHY_AUX_CLK				158
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				159
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				160
+#define GCC_USB3_PRIM_PHY_PIPE_CLK				161
+#define GCC_USB3_SEC_CLKREF_CLK					162
+#define GCC_USB3_SEC_PHY_AUX_CLK				163
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC				164
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK				165
+#define GCC_USB3_SEC_PHY_PIPE_CLK				166
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK				167
+#define GCC_VIDEO_AHB_CLK					168
+#define GCC_VIDEO_AXI_CLK					169
+#define GCC_VIDEO_XO_CLK					170
+#define GPLL0							171
+#define GPLL0_OUT_EVEN						172
+#define GPLL0_OUT_MAIN						173
+#define GPLL1							174
+#define GPLL1_OUT_MAIN						175
 
 /* GCC reset clocks */
 #define GCC_GPU_BCR						0
@@ -224,4 +219,10 @@
 #define GCC_USB3_DP_PHY_SEC_BCR					23
 #define GCC_USB_PHY_CFG_AHB2PHY_BCR				24
 
+/* Dummy clocks for rate measurement */
+#define MEASURE_ONLY_SNOC_CLK					0
+#define MEASURE_ONLY_CNOC_CLK					1
+#define MEASURE_ONLY_BIMC_CLK					2
+#define MEASURE_ONLY_IPA_2X_CLK					3
+
 #endif
diff --git a/include/dt-bindings/msm/power-on.h b/include/dt-bindings/msm/power-on.h
new file mode 100644
index 0000000..f43841e
--- /dev/null
+++ b/include/dt-bindings/msm/power-on.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MSM_POWER_ON_H__
+#define __MSM_POWER_ON_H__
+
+#define PON_POWER_OFF_RESERVED		0x00
+#define PON_POWER_OFF_WARM_RESET	0x01
+#define PON_POWER_OFF_SHUTDOWN		0x04
+#define PON_POWER_OFF_DVDD_SHUTDOWN	0x05
+#define PON_POWER_OFF_HARD_RESET	0x07
+#define PON_POWER_OFF_DVDD_HARD_RESET	0x08
+#define PON_POWER_OFF_MAX_TYPE		0x10
+
+#endif
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 22fd849..b871c0c 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -65,12 +65,6 @@
 #define EXTCON_JACK_SPDIF_IN	26	/* Sony Philips Digital InterFace */
 #define EXTCON_JACK_SPDIF_OUT	27
 
-/* connector orientation 0 - CC1, 1 - CC2 */
-#define EXTCON_USB_CC		28
-
-/* connector speed 0 - High Speed, 1 - super speed */
-#define EXTCON_USB_SPEED	29
-
 /* Display external connector */
 #define EXTCON_DISP_HDMI	40	/* High-Definition Multimedia Interface */
 #define EXTCON_DISP_MHL		41	/* Mobile High-Definition Link */
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 01c0b9c..8c58db2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -162,8 +162,8 @@
 		    int len, void *val);
 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
 			    int len, struct kvm_io_device *dev);
-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
-			      struct kvm_io_device *dev);
+void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+			       struct kvm_io_device *dev);
 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
 					 gpa_t addr);
 
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 2546988..8b35bdb 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -739,6 +739,12 @@
 	return false;
 }
 
+static inline void mem_cgroup_update_page_stat(struct page *page,
+					       enum mem_cgroup_stat_index idx,
+					       int nr)
+{
+}
+
 static inline void mem_cgroup_inc_page_stat(struct page *page,
 					    enum mem_cgroup_stat_index idx)
 {
diff --git a/include/linux/msm_adreno_devfreq.h b/include/linux/msm_adreno_devfreq.h
index 1e580d3..2b94289 100644
--- a/include/linux/msm_adreno_devfreq.h
+++ b/include/linux/msm_adreno_devfreq.h
@@ -21,6 +21,10 @@
 #define ADRENO_DEVFREQ_NOTIFY_RETIRE	2
 #define ADRENO_DEVFREQ_NOTIFY_IDLE	3
 
+#define DEVFREQ_FLAG_WAKEUP_MAXFREQ	0x2
+#define DEVFREQ_FLAG_FAST_HINT		0x4
+#define DEVFREQ_FLAG_SLOW_HINT		0x8
+
 struct device;
 
 int kgsl_devfreq_add_notifier(struct device *device,
diff --git a/include/linux/platform_data/qcom_crypto_device.h b/include/linux/platform_data/qcom_crypto_device.h
new file mode 100644
index 0000000..eadaa42
--- /dev/null
+++ b/include/linux/platform_data/qcom_crypto_device.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CRYPTO_DEVICE__H
+#define __QCOM_CRYPTO_DEVICE__H
+
+struct msm_ce_hw_support {
+	uint32_t ce_shared;
+	uint32_t shared_ce_resource;
+	uint32_t hw_key_support;
+	uint32_t sha_hmac;
+	void *bus_scale_table;
+};
+
+#endif /* __QCOM_CRYPTO_DEVICE__H */
diff --git a/drivers/power/supply/qcom/pmic-voter.h b/include/linux/pmic-voter.h
similarity index 90%
rename from drivers/power/supply/qcom/pmic-voter.h
rename to include/linux/pmic-voter.h
index 031b9a0..f202bf7 100644
--- a/drivers/power/supply/qcom/pmic-voter.h
+++ b/include/linux/pmic-voter.h
@@ -24,6 +24,9 @@
 	NUM_VOTABLE_TYPES,
 };
 
+bool is_client_vote_enabled(struct votable *votable, const char *client_str);
+bool is_client_vote_enabled_locked(struct votable *votable,
+							const char *client_str);
 int get_client_vote(struct votable *votable, const char *client_str);
 int get_client_vote_locked(struct votable *votable, const char *client_str);
 int get_effective_result(struct votable *votable);
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index b46d6a8..77912a1 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -217,6 +217,7 @@
 	POWER_SUPPLY_PROP_DP_DM,
 	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
 	POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE,
 	POWER_SUPPLY_PROP_CURRENT_QNOVO,
 	POWER_SUPPLY_PROP_VOLTAGE_QNOVO,
 	POWER_SUPPLY_PROP_RERUN_AICL,
@@ -245,6 +246,7 @@
 	POWER_SUPPLY_PROP_DIE_HEALTH,
 	POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
 	POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
+	POWER_SUPPLY_PROP_HW_CURRENT_MAX,
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
 	/* Properties of type `const char *' */
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index e1ad51e..0de4da6 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -248,6 +248,17 @@
 #define RX_DMA_IRQ_DELAY_MSK	(GENMASK(8, 6))
 #define RX_DMA_IRQ_DELAY_SHFT	(6)
 
+static inline unsigned int geni_read_reg_nolog(void __iomem *base, int offset)
+{
+	return readl_relaxed_no_log(base + offset);
+}
+
+static inline void geni_write_reg_nolog(unsigned int value, void __iomem *base,
+				int offset)
+{
+	return writel_relaxed_no_log(value, (base + offset));
+}
+
 static inline unsigned int geni_read_reg(void __iomem *base, int offset)
 {
 	return readl_relaxed(base + offset);
diff --git a/include/linux/qcrypto.h b/include/linux/qcrypto.h
new file mode 100644
index 0000000..252464a
--- /dev/null
+++ b/include/linux/qcrypto.h
@@ -0,0 +1,65 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTO_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTO_H_
+
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+
+#define QCRYPTO_CTX_KEY_MASK		0x000000ff
+#define QCRYPTO_CTX_USE_HW_KEY		0x00000001
+#define QCRYPTO_CTX_USE_PIPE_KEY	0x00000002
+
+#define QCRYPTO_CTX_XTS_MASK		0x0000ff00
+#define QCRYPTO_CTX_XTS_DU_SIZE_512B	0x00000100
+#define QCRYPTO_CTX_XTS_DU_SIZE_1KB	0x00000200
+
+
+int qcrypto_cipher_set_device(struct ablkcipher_request *req, unsigned int dev);
+int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev);
+/*int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev);*/
+
+int qcrypto_cipher_set_flag(struct ablkcipher_request *req, unsigned int flags);
+int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags);
+/*int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags);*/
+
+int qcrypto_cipher_clear_flag(struct ablkcipher_request *req,
+							unsigned int flags);
+int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags);
+/*int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags);*/
+
+struct crypto_engine_entry {
+	u32 hw_instance;
+	u32 ce_device;
+	int shared;
+};
+
+int qcrypto_get_num_engines(void);
+void qcrypto_get_engine_list(size_t num_engines,
+				struct crypto_engine_entry *arr);
+int qcrypto_cipher_set_device_hw(struct ablkcipher_request *req,
+				unsigned int fde_pfe,
+				unsigned int hw_inst);
+
+
+struct qcrypto_func_set {
+	int (*cipher_set)(struct ablkcipher_request *req,
+			unsigned int fde_pfe,
+			unsigned int hw_inst);
+	int (*cipher_flag)(struct ablkcipher_request *req, unsigned int flags);
+	int (*get_num_engines)(void);
+	void (*get_engine_list)(size_t num_engines,
+				struct crypto_engine_entry *arr);
+};
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTO_H */
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index 263f20a..ffb6393 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -45,6 +45,7 @@
 	USB_PHY_TYPE_UNDEFINED,
 	USB_PHY_TYPE_USB2,
 	USB_PHY_TYPE_USB3,
+	USB_PHY_TYPE_USB3_DP,
 };
 
 /* OTG defines lots of enumeration states before device reset */
diff --git a/include/soc/qcom/qseecomi.h b/include/soc/qcom/qseecomi.h
new file mode 100644
index 0000000..0efea04
--- /dev/null
+++ b/include/soc/qcom/qseecomi.h
@@ -0,0 +1,729 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QSEECOMI_H_
+#define __QSEECOMI_H_
+
+#include <linux/qseecom.h>
+
+#define QSEECOM_KEY_ID_SIZE   32
+
+#define QSEOS_RESULT_FAIL_SEND_CMD_NO_THREAD  -19   /*0xFFFFFFED*/
+#define QSEOS_RESULT_FAIL_UNSUPPORTED_CE_PIPE -63
+#define QSEOS_RESULT_FAIL_KS_OP               -64
+#define QSEOS_RESULT_FAIL_KEY_ID_EXISTS       -65
+#define QSEOS_RESULT_FAIL_MAX_KEYS            -66
+#define QSEOS_RESULT_FAIL_SAVE_KS             -67
+#define QSEOS_RESULT_FAIL_LOAD_KS             -68
+#define QSEOS_RESULT_FAIL_KS_ALREADY_DONE     -69
+#define QSEOS_RESULT_FAIL_KEY_ID_DNE          -70
+#define QSEOS_RESULT_FAIL_INCORRECT_PSWD      -71
+#define QSEOS_RESULT_FAIL_MAX_ATTEMPT         -72
+#define QSEOS_RESULT_FAIL_PENDING_OPERATION   -73
+
+enum qseecom_command_scm_resp_type {
+	QSEOS_APP_ID = 0xEE01,
+	QSEOS_LISTENER_ID
+};
+
+enum qseecom_qceos_cmd_id {
+	QSEOS_APP_START_COMMAND      = 0x01,
+	QSEOS_APP_SHUTDOWN_COMMAND,
+	QSEOS_APP_LOOKUP_COMMAND,
+	QSEOS_REGISTER_LISTENER,
+	QSEOS_DEREGISTER_LISTENER,
+	QSEOS_CLIENT_SEND_DATA_COMMAND,
+	QSEOS_LISTENER_DATA_RSP_COMMAND,
+	QSEOS_LOAD_EXTERNAL_ELF_COMMAND,
+	QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND,
+	QSEOS_GET_APP_STATE_COMMAND,
+	QSEOS_LOAD_SERV_IMAGE_COMMAND,
+	QSEOS_UNLOAD_SERV_IMAGE_COMMAND,
+	QSEOS_APP_REGION_NOTIFICATION,
+	QSEOS_REGISTER_LOG_BUF_COMMAND,
+	QSEOS_RPMB_PROVISION_KEY_COMMAND,
+	QSEOS_RPMB_ERASE_COMMAND,
+	QSEOS_GENERATE_KEY  = 0x11,
+	QSEOS_DELETE_KEY,
+	QSEOS_MAX_KEY_COUNT,
+	QSEOS_SET_KEY,
+	QSEOS_UPDATE_KEY_USERINFO,
+	QSEOS_TEE_OPEN_SESSION,
+	QSEOS_TEE_INVOKE_COMMAND,
+	QSEOS_TEE_INVOKE_MODFD_COMMAND = QSEOS_TEE_INVOKE_COMMAND,
+	QSEOS_TEE_CLOSE_SESSION,
+	QSEOS_TEE_REQUEST_CANCELLATION,
+	QSEOS_CONTINUE_BLOCKED_REQ_COMMAND,
+	QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND = 0x1B,
+	QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST = 0x1C,
+	QSEOS_TEE_OPEN_SESSION_WHITELIST = 0x1D,
+	QSEOS_TEE_INVOKE_COMMAND_WHITELIST = 0x1E,
+	QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST = 0x1F,
+	QSEOS_FSM_LTEOTA_REQ_CMD = 0x109,
+	QSEOS_FSM_LTEOTA_REQ_RSP_CMD = 0x110,
+	QSEOS_FSM_IKE_REQ_CMD = 0x203,
+	QSEOS_FSM_IKE_REQ_RSP_CMD = 0x204,
+	QSEOS_FSM_OEM_FUSE_WRITE_ROW = 0x301,
+	QSEOS_FSM_OEM_FUSE_READ_ROW = 0x302,
+	QSEOS_FSM_ENCFS_REQ_CMD = 0x403,
+	QSEOS_FSM_ENCFS_REQ_RSP_CMD = 0x404,
+
+	QSEOS_CMD_MAX     = 0xEFFFFFFF
+};
+
+enum qseecom_qceos_cmd_status {
+	QSEOS_RESULT_SUCCESS = 0,
+	QSEOS_RESULT_INCOMPLETE,
+	QSEOS_RESULT_BLOCKED_ON_LISTENER,
+	QSEOS_RESULT_FAILURE  = 0xFFFFFFFF
+};
+
+enum qseecom_pipe_type {
+	QSEOS_PIPE_ENC = 0x1,
+	QSEOS_PIPE_ENC_XTS = 0x2,
+	QSEOS_PIPE_AUTH = 0x4,
+	QSEOS_PIPE_ENUM_FILL = 0x7FFFFFFF
+};
+
+/* QSEE Reentrancy support phase */
+enum qseecom_qsee_reentrancy_phase {
+	QSEE_REENTRANCY_PHASE_0 = 0,
+	QSEE_REENTRANCY_PHASE_1,
+	QSEE_REENTRANCY_PHASE_2,
+	QSEE_REENTRANCY_PHASE_3,
+	QSEE_REENTRANCY_PHASE_MAX = 0xFF
+};
+
+__packed  struct qsee_apps_region_info_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t addr;
+	uint32_t size;
+};
+
+__packed  struct qsee_apps_region_info_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint64_t addr;
+	uint32_t size;
+};
+
+__packed struct qseecom_check_app_ireq {
+	uint32_t qsee_cmd_id;
+	char     app_name[MAX_APP_NAME_SIZE];
+};
+
+__packed struct qseecom_load_app_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;		/* Length of the mdt file */
+	uint32_t img_len;		/* Length of .bxx and .mdt files */
+	uint32_t phy_addr;		/* phy addr of the start of image */
+	char     app_name[MAX_APP_NAME_SIZE];	/* application name*/
+};
+
+__packed struct qseecom_load_app_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;
+	uint32_t img_len;
+	uint64_t phy_addr;
+	char     app_name[MAX_APP_NAME_SIZE];
+};
+
+__packed struct qseecom_unload_app_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t  app_id;
+};
+
+__packed struct qseecom_load_lib_image_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;
+	uint32_t img_len;
+	uint32_t phy_addr;
+};
+
+__packed struct qseecom_load_lib_image_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;
+	uint32_t img_len;
+	uint64_t phy_addr;
+};
+
+__packed struct qseecom_unload_lib_image_ireq {
+	uint32_t qsee_cmd_id;
+};
+
+__packed struct qseecom_register_listener_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	uint32_t sb_ptr;
+	uint32_t sb_len;
+};
+
+__packed struct qseecom_register_listener_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	uint64_t sb_ptr;
+	uint32_t sb_len;
+};
+
+__packed struct qseecom_unregister_listener_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t  listener_id;
+};
+
+__packed struct qseecom_client_send_data_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t app_id;
+	uint32_t req_ptr;
+	uint32_t req_len;
+	uint32_t rsp_ptr;/* First 4 bytes should be the return status */
+	uint32_t rsp_len;
+	uint32_t sglistinfo_ptr;
+	uint32_t sglistinfo_len;
+};
+
+__packed struct qseecom_client_send_data_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t app_id;
+	uint64_t req_ptr;
+	uint32_t req_len;
+	uint64_t rsp_ptr;
+	uint32_t rsp_len;
+	uint64_t sglistinfo_ptr;
+	uint32_t sglistinfo_len;
+};
+
+__packed struct qseecom_reg_log_buf_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t phy_addr;
+	uint32_t len;
+};
+
+__packed struct qseecom_reg_log_buf_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint64_t phy_addr;
+	uint32_t len;
+};
+
+/* send_data resp */
+__packed struct qseecom_client_listener_data_irsp {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	uint32_t status;
+	uint32_t sglistinfo_ptr;
+	uint32_t sglistinfo_len;
+};
+
+__packed struct qseecom_client_listener_data_64bit_irsp {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	uint32_t status;
+	uint64_t sglistinfo_ptr;
+	uint32_t sglistinfo_len;
+};
+
+/*
+ * struct qseecom_command_scm_resp - qseecom response buffer
+ * @cmd_status: value from enum tz_sched_cmd_status
+ * @sb_in_rsp_addr: points to physical location of response
+ *                buffer
+ * @sb_in_rsp_len: length of command response
+ */
+__packed struct qseecom_command_scm_resp {
+	uint32_t result;
+	enum qseecom_command_scm_resp_type resp_type;
+	unsigned int data;
+};
+
+struct qseecom_rpmb_provision_key {
+	uint32_t key_type;
+};
+
+__packed struct qseecom_client_send_service_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t key_type; /* in */
+	unsigned int req_len; /* in */
+	uint32_t rsp_ptr; /* in/out */
+	unsigned int rsp_len; /* in/out */
+};
+
+__packed struct qseecom_client_send_service_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t key_type;
+	unsigned int req_len;
+	uint64_t rsp_ptr;
+	unsigned int rsp_len;
+};
+
+__packed struct qseecom_key_generate_ireq {
+	uint32_t qsee_command_id;
+	uint32_t flags;
+	uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+	uint8_t hash32[QSEECOM_HASH_SIZE];
+};
+
+__packed struct qseecom_key_select_ireq {
+	uint32_t qsee_command_id;
+	uint32_t ce;
+	uint32_t pipe;
+	uint32_t pipe_type;
+	uint32_t flags;
+	uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+	uint8_t hash32[QSEECOM_HASH_SIZE];
+};
+
+__packed struct qseecom_key_delete_ireq {
+	uint32_t qsee_command_id;
+	uint32_t flags;
+	uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+	uint8_t hash32[QSEECOM_HASH_SIZE];
+
+};
+
+__packed struct qseecom_key_userinfo_update_ireq {
+	uint32_t qsee_command_id;
+	uint32_t flags;
+	uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+	uint8_t current_hash32[QSEECOM_HASH_SIZE];
+	uint8_t new_hash32[QSEECOM_HASH_SIZE];
+};
+
+__packed struct qseecom_key_max_count_query_ireq {
+	uint32_t flags;
+};
+
+__packed struct qseecom_key_max_count_query_irsp {
+	uint32_t max_key_count;
+};
+
+__packed struct qseecom_qteec_ireq {
+	uint32_t    qsee_cmd_id;
+	uint32_t    app_id;
+	uint32_t    req_ptr;
+	uint32_t    req_len;
+	uint32_t    resp_ptr;
+	uint32_t    resp_len;
+	uint32_t    sglistinfo_ptr;
+	uint32_t    sglistinfo_len;
+};
+
+__packed struct qseecom_qteec_64bit_ireq {
+	uint32_t    qsee_cmd_id;
+	uint32_t    app_id;
+	uint64_t    req_ptr;
+	uint32_t    req_len;
+	uint64_t    resp_ptr;
+	uint32_t    resp_len;
+	uint64_t    sglistinfo_ptr;
+	uint32_t    sglistinfo_len;
+};
+
+__packed struct qseecom_client_send_fsm_key_req {
+	uint32_t qsee_cmd_id;
+	uint32_t req_ptr;
+	uint32_t req_len;
+	uint32_t rsp_ptr;
+	uint32_t rsp_len;
+};
+
+__packed struct qseecom_continue_blocked_request_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t app_id;
+};
+
+
+/**********      ARMV8 SMC INTERFACE TZ MACRO     *******************/
+
+#define TZ_SVC_APP_MGR                   1     /* Application management */
+#define TZ_SVC_LISTENER                  2     /* Listener service management */
+#define TZ_SVC_EXTERNAL                  3     /* External image loading */
+#define TZ_SVC_RPMB                      4     /* RPMB */
+#define TZ_SVC_KEYSTORE                  5     /* Keystore management */
+#define TZ_SVC_ES                        16    /* Enterprise Security */
+#define TZ_SVC_MDTP                      18    /* Mobile Device Theft */
+
+/*----------------------------------------------------------------------------
+ * Owning Entity IDs (defined by ARM SMC doc)
+ * ---------------------------------------------------------------------------
+ */
+#define TZ_OWNER_ARM                     0     /** ARM Architecture call ID */
+#define TZ_OWNER_CPU                     1     /** CPU service call ID */
+#define TZ_OWNER_SIP                     2     /** SIP service call ID */
+#define TZ_OWNER_OEM                     3     /** OEM service call ID */
+#define TZ_OWNER_STD                     4     /** Standard service call ID */
+
+/** Values 5-47 are reserved for future use */
+
+/** Trusted Application call IDs */
+#define TZ_OWNER_TZ_APPS                 48
+#define TZ_OWNER_TZ_APPS_RESERVED        49
+/** Trusted OS Call IDs */
+#define TZ_OWNER_QSEE_OS                 50
+#define TZ_OWNER_MOBI_OS                 51
+#define TZ_OWNER_OS_RESERVED_3           52
+#define TZ_OWNER_OS_RESERVED_4           53
+#define TZ_OWNER_OS_RESERVED_5           54
+#define TZ_OWNER_OS_RESERVED_6           55
+#define TZ_OWNER_OS_RESERVED_7           56
+#define TZ_OWNER_OS_RESERVED_8           57
+#define TZ_OWNER_OS_RESERVED_9           58
+#define TZ_OWNER_OS_RESERVED_10          59
+#define TZ_OWNER_OS_RESERVED_11          60
+#define TZ_OWNER_OS_RESERVED_12          61
+#define TZ_OWNER_OS_RESERVED_13          62
+#define TZ_OWNER_OS_RESERVED_14          63
+
+#define TZ_SVC_INFO                      6    /* Misc. information services */
+
+/** Trusted Application call groups */
+#define TZ_SVC_APP_ID_PLACEHOLDER        0    /* SVC bits will contain App ID */
+
+/** General helper macro to create a bitmask from bits low to high. */
+#define TZ_MASK_BITS(h, l)     ((0xffffffff >> (32 - ((h - l) + 1))) << l)
+
+/*
+ * Macro used to define an SMC ID based on the owner ID,
+ * service ID, and function number.
+ */
+#define TZ_SYSCALL_CREATE_SMC_ID(o, s, f) \
+	((uint32_t)((((o & 0x3f) << 24) | (s & 0xff) << 8) | (f & 0xff)))
+
+#define TZ_SYSCALL_PARAM_NARGS_MASK  TZ_MASK_BITS(3, 0)
+#define TZ_SYSCALL_PARAM_TYPE_MASK   TZ_MASK_BITS(1, 0)
+
+#define TZ_SYSCALL_CREATE_PARAM_ID(nargs, p1, p2, p3, \
+	p4, p5, p6, p7, p8, p9, p10) \
+	((nargs&TZ_SYSCALL_PARAM_NARGS_MASK)+ \
+	((p1&TZ_SYSCALL_PARAM_TYPE_MASK)<<4)+ \
+	((p2&TZ_SYSCALL_PARAM_TYPE_MASK)<<6)+ \
+	((p3&TZ_SYSCALL_PARAM_TYPE_MASK)<<8)+ \
+	((p4&TZ_SYSCALL_PARAM_TYPE_MASK)<<10)+ \
+	((p5&TZ_SYSCALL_PARAM_TYPE_MASK)<<12)+ \
+	((p6&TZ_SYSCALL_PARAM_TYPE_MASK)<<14)+ \
+	((p7&TZ_SYSCALL_PARAM_TYPE_MASK)<<16)+ \
+	((p8&TZ_SYSCALL_PARAM_TYPE_MASK)<<18)+ \
+	((p9&TZ_SYSCALL_PARAM_TYPE_MASK)<<20)+ \
+	((p10&TZ_SYSCALL_PARAM_TYPE_MASK)<<22))
+
+/*
+ * Macros used to create the Parameter ID associated with the syscall
+ */
+#define TZ_SYSCALL_CREATE_PARAM_ID_0 0
+#define TZ_SYSCALL_CREATE_PARAM_ID_1(p1) \
+	TZ_SYSCALL_CREATE_PARAM_ID(1, p1, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_2(p1, p2) \
+	TZ_SYSCALL_CREATE_PARAM_ID(2, p1, p2, 0, 0, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_3(p1, p2, p3) \
+	TZ_SYSCALL_CREATE_PARAM_ID(3, p1, p2, p3, 0, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_4(p1, p2, p3, p4) \
+	TZ_SYSCALL_CREATE_PARAM_ID(4, p1, p2, p3, p4, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_5(p1, p2, p3, p4, p5) \
+	TZ_SYSCALL_CREATE_PARAM_ID(5, p1, p2, p3, p4, p5, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_6(p1, p2, p3, p4, p5, p6) \
+	TZ_SYSCALL_CREATE_PARAM_ID(6, p1, p2, p3, p4, p5, p6, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_7(p1, p2, p3, p4, p5, p6, p7) \
+	TZ_SYSCALL_CREATE_PARAM_ID(7, p1, p2, p3, p4, p5, p6, p7, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_8(p1, p2, p3, p4, p5, p6, p7, p8) \
+	TZ_SYSCALL_CREATE_PARAM_ID(8, p1, p2, p3, p4, p5, p6, p7, p8, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_9(p1, p2, p3, p4, p5, p6, p7, p8, p9) \
+	TZ_SYSCALL_CREATE_PARAM_ID(9, p1, p2, p3, p4, p5, p6, p7, p8, p9, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_10(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10) \
+	TZ_SYSCALL_CREATE_PARAM_ID(10, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10)
+
+/*
+ * Macro used to obtain the Parameter ID associated with the syscall
+ */
+#define TZ_SYSCALL_GET_PARAM_ID(CMD_ID)        CMD_ID ## _PARAM_ID
+
+/** Helper macro to extract the owning entity from the SMC ID. */
+#define TZ_SYSCALL_OWNER_ID(r0)   ((r0 & TZ_MASK_BITS(29, 24)) >> 24)
+
+/** Helper macro for checking whether an owning entity is of type trusted OS. */
+#define IS_OWNER_TRUSTED_OS(owner_id) \
+			(((owner_id >= 50) && (owner_id <= 63)) ? 1:0)
+
+#define TZ_SYSCALL_PARAM_TYPE_VAL              0x0     /* type of value */
+#define TZ_SYSCALL_PARAM_TYPE_BUF_RO           0x1     /* type of buffer RO */
+#define TZ_SYSCALL_PARAM_TYPE_BUF_RW           0x2     /* type of buffer RW */
+
+#define TZ_OS_APP_START_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x01)
+
+#define TZ_OS_APP_START_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_SHUTDOWN_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x02)
+
+#define TZ_OS_APP_SHUTDOWN_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_LOOKUP_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x03)
+
+#define TZ_OS_APP_LOOKUP_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_GET_STATE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x04)
+
+#define TZ_OS_APP_GET_STATE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_REGION_NOTIFICATION_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x05)
+
+#define TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_REGISTER_LOG_BUFFER_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x06)
+
+#define TZ_OS_REGISTER_LOG_BUFFER_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LOAD_SERVICES_IMAGE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x07)
+
+#define TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_UNLOAD_SERVICES_IMAGE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x08)
+
+#define TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_OS_REGISTER_LISTENER_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x01)
+
+#define TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x06)
+
+#define TZ_OS_REGISTER_LISTENER_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_DEREGISTER_LISTENER_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x02)
+
+#define TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x03)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LOAD_EXTERNAL_IMAGE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_EXTERNAL, 0x01)
+
+#define TZ_OS_LOAD_EXTERNAL_IMAGE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_QSAPP_SEND_DATA_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x01)
+
+
+#define TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_5( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_EXTERNAL, 0x02)
+
+#define TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_INFO_IS_SVC_AVAILABLE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_INFO, 0x01)
+
+#define TZ_INFO_IS_SVC_AVAILABLE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_INFO_GET_FEATURE_VERSION_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_INFO, 0x03)
+
+#define TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_RPMB_PROVISION_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x01)
+
+#define TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_RPMB_ERASE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x02)
+
+#define TZ_OS_RPMB_ERASE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_OS_RPMB_CHECK_PROV_STATUS_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x03)
+
+#define TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_OS_KS_GEN_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x01)
+
+#define TZ_OS_KS_GEN_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_DEL_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x02)
+
+#define TZ_OS_KS_DEL_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_GET_MAX_KEYS_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x03)
+
+#define TZ_OS_KS_GET_MAX_KEYS_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_SET_PIPE_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x04)
+
+#define TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_UPDATE_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x05)
+
+#define TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_ES_SAVE_PARTITION_HASH_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, 0x01)
+
+#define TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_ID					\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x02)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID				\
+	TZ_SYSCALL_CREATE_PARAM_ID_5(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_CLOSE_SESSION_ID					\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x03)
+
+#define TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID				\
+	TZ_SYSCALL_CREATE_PARAM_ID_5(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_ID					\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x04)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID				\
+	TZ_SYSCALL_CREATE_PARAM_ID_5(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_REQUEST_CANCELLATION_ID				\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x05)
+
+#define TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID			\
+	TZ_SYSCALL_CREATE_PARAM_ID_5(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_MDTP_CIPHER_DIP_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_MDTP, 0x1)
+
+#define TZ_MDTP_CIPHER_DIP_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_5( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RO, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x04)
+
+#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x06)
+
+#define TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_7( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID			\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x07)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID		\
+	TZ_SYSCALL_CREATE_PARAM_ID_7(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID			\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x09)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID		\
+	TZ_SYSCALL_CREATE_PARAM_ID_7(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x05)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_4( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#endif /* __QSEECOMI_H_ */
diff --git a/include/uapi/linux/fips_status.h b/include/uapi/linux/fips_status.h
index d7cda94..7daf27b 100644
--- a/include/uapi/linux/fips_status.h
+++ b/include/uapi/linux/fips_status.h
@@ -5,24 +5,24 @@
 #include <linux/ioctl.h>
 
 /**
- * fips_status: global FIPS140-2 status
- * @FIPS140_STATUS_NA:
- *					Not a FIPS140-2 compliant Build.
- *					The flag status won't
- *					change throughout
- *					the lifetime
- * @FIPS140_STATUS_PASS_CRYPTO:
- *					KAT self tests are passed.
- * @FIPS140_STATUS_QCRYPTO_ALLOWED:
- *					Integrity test is passed.
- * @FIPS140_STATUS_PASS:
- *					All tests are passed and build
- *					is in FIPS140-2 mode
- * @FIPS140_STATUS_FAIL:
- *					One of the test is failed.
- *					This will block all requests
- *					to crypto modules
- */
+* fips_status: global FIPS140-2 status
+* @FIPS140_STATUS_NA:
+*					Not a FIPS140-2 compliant Build.
+*					The flag status won't
+*					change throughout
+*					the lifetime
+* @FIPS140_STATUS_PASS_CRYPTO:
+*					KAT self tests are passed.
+* @FIPS140_STATUS_QCRYPTO_ALLOWED:
+*					Integrity test is passed.
+* @FIPS140_STATUS_PASS:
+*					All tests are passed and build
+*					is in FIPS140-2 mode
+* @FIPS140_STATUS_FAIL:
+*					One of the test is failed.
+*					This will block all requests
+*					to crypto modules
+*/
 enum fips_status {
 		FIPS140_STATUS_NA				= 0,
 		FIPS140_STATUS_PASS_CRYPTO		= 1,
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 1e6ccf4..817feba 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -161,6 +161,7 @@
 	IPA_CLIENT_Q6_DECOMP_PROD,
 	IPA_CLIENT_Q6_DECOMP2_PROD,
 	IPA_CLIENT_UC_USB_PROD,
+	IPA_CLIENT_ETHERNET_PROD,
 
 	/* Below PROD client type is only for test purpose */
 	IPA_CLIENT_TEST_PROD,
@@ -200,6 +201,8 @@
 	IPA_CLIENT_Q6_DECOMP_CONS,
 	IPA_CLIENT_Q6_DECOMP2_CONS,
 	IPA_CLIENT_Q6_LTE_WIFI_AGGR_CONS,
+	IPA_CLIENT_ETHERNET_CONS,
+
 	/* Below CONS client type is only for test purpose */
 	IPA_CLIENT_TEST_CONS,
 	IPA_CLIENT_TEST1_CONS,
@@ -417,6 +420,7 @@
 	IPA_RM_RESOURCE_WLAN_PROD,
 	IPA_RM_RESOURCE_ODU_ADAPT_PROD,
 	IPA_RM_RESOURCE_MHI_PROD,
+	IPA_RM_RESOURCE_ETHERNET_PROD,
 	IPA_RM_RESOURCE_PROD_MAX,
 
 	IPA_RM_RESOURCE_Q6_CONS = IPA_RM_RESOURCE_PROD_MAX,
@@ -427,6 +431,7 @@
 	IPA_RM_RESOURCE_APPS_CONS,
 	IPA_RM_RESOURCE_ODU_ADAPT_CONS,
 	IPA_RM_RESOURCE_MHI_CONS,
+	IPA_RM_RESOURCE_ETHERNET_CONS,
 	IPA_RM_RESOURCE_MAX
 };
 
diff --git a/include/uapi/linux/qcedev.h b/include/uapi/linux/qcedev.h
index 6fee15d..fb51c23 100644
--- a/include/uapi/linux/qcedev.h
+++ b/include/uapi/linux/qcedev.h
@@ -20,14 +20,14 @@
 #define QCEDEV_AES_KEY_192	24
 #define QCEDEV_AES_KEY_256	32
 /**
- *qcedev_oper_enum: Operation types
- * @QCEDEV_OPER_ENC:		Encrypt
- * @QCEDEV_OPER_DEC:		Decrypt
- * @QCEDEV_OPER_ENC_NO_KEY:	Encrypt. Do not need key to be specified by
- *				user. Key already set by an external processor.
- * @QCEDEV_OPER_DEC_NO_KEY:	Decrypt. Do not need the key to be specified by
- *				user. Key already set by an external processor.
- */
+*qcedev_oper_enum: Operation types
+* @QCEDEV_OPER_ENC:		Encrypt
+* @QCEDEV_OPER_DEC:		Decrypt
+* @QCEDEV_OPER_ENC_NO_KEY:	Encrypt. Do not need key to be specified by
+*				user. Key already set by an external processor.
+* @QCEDEV_OPER_DEC_NO_KEY:	Decrypt. Do not need the key to be specified by
+*				user. Key already set by an external processor.
+*/
 enum qcedev_oper_enum {
 	QCEDEV_OPER_DEC		= 0,
 	QCEDEV_OPER_ENC		= 1,
@@ -37,11 +37,11 @@
 };
 
 /**
- *qcedev_oper_enum: Cipher algorithm types
- * @QCEDEV_ALG_DES:		DES
- * @QCEDEV_ALG_3DES:		3DES
- * @QCEDEV_ALG_AES:		AES
- */
+*qcedev_oper_enum: Cipher algorithm types
+* @QCEDEV_ALG_DES:		DES
+* @QCEDEV_ALG_3DES:		3DES
+* @QCEDEV_ALG_AES:		AES
+*/
 enum qcedev_cipher_alg_enum {
 	QCEDEV_ALG_DES		= 0,
 	QCEDEV_ALG_3DES		= 1,
@@ -50,15 +50,15 @@
 };
 
 /**
- *qcedev_cipher_mode_enum : AES mode
- * @QCEDEV_AES_MODE_CBC:		CBC
- * @QCEDEV_AES_MODE_ECB:		ECB
- * @QCEDEV_AES_MODE_CTR:		CTR
- * @QCEDEV_AES_MODE_XTS:		XTS
- * @QCEDEV_AES_MODE_CCM:		CCM
- * @QCEDEV_DES_MODE_CBC:		CBC
- * @QCEDEV_DES_MODE_ECB:		ECB
- */
+*qcedev_cipher_mode_enum : AES mode
+* @QCEDEV_AES_MODE_CBC:		CBC
+* @QCEDEV_AES_MODE_ECB:		ECB
+* @QCEDEV_AES_MODE_CTR:		CTR
+* @QCEDEV_AES_MODE_XTS:		XTS
+* @QCEDEV_AES_MODE_CCM:		CCM
+* @QCEDEV_DES_MODE_CBC:		CBC
+* @QCEDEV_DES_MODE_ECB:		ECB
+*/
 enum qcedev_cipher_mode_enum {
 	QCEDEV_AES_MODE_CBC	= 0,
 	QCEDEV_AES_MODE_ECB	= 1,
@@ -71,13 +71,13 @@
 };
 
 /**
- *enum qcedev_sha_alg_enum : Secure Hashing Algorithm
- * @QCEDEV_ALG_SHA1:		Digest returned: 20 bytes (160 bits)
- * @QCEDEV_ALG_SHA256:		Digest returned: 32 bytes (256 bit)
- * @QCEDEV_ALG_SHA1_HMAC:	HMAC returned 20 bytes (160 bits)
- * @QCEDEV_ALG_SHA256_HMAC:	HMAC returned 32 bytes (256 bit)
- * @QCEDEV_ALG_AES_CMAC:		Configurable MAC size
- */
+*enum qcedev_sha_alg_enum : Secure Hashing Algorithm
+* @QCEDEV_ALG_SHA1:		Digest returned: 20 bytes (160 bits)
+* @QCEDEV_ALG_SHA256:		Digest returned: 32 bytes (256 bit)
+* @QCEDEV_ALG_SHA1_HMAC:	HMAC returned 20 bytes (160 bits)
+* @QCEDEV_ALG_SHA256_HMAC:	HMAC returned 32 bytes (256 bit)
+* @QCEDEV_ALG_AES_CMAC:		Configurable MAC size
+*/
 enum qcedev_sha_alg_enum {
 	QCEDEV_ALG_SHA1		= 0,
 	QCEDEV_ALG_SHA256	= 1,
@@ -88,12 +88,12 @@
 };
 
 /**
- * struct buf_info - Buffer information
- * @offset:			Offset from the base address of the buffer
- *				(Used when buffer is allocated using PMEM)
- * @vaddr:			Virtual buffer address pointer
- * @len:				Size of the buffer
- */
+* struct buf_info - Buffer information
+* @offset:			Offset from the base address of the buffer
+*				(Used when buffer is allocated using PMEM)
+* @vaddr:			Virtual buffer address pointer
+* @len:				Size of the buffer
+*/
 struct	buf_info {
 	union {
 		uint32_t	offset;
@@ -103,26 +103,26 @@
 };
 
 /**
- * struct qcedev_vbuf_info - Source and destination Buffer information
- * @src:				Array of buf_info for input/source
- * @dst:				Array of buf_info for output/destination
- */
+* struct qcedev_vbuf_info - Source and destination Buffer information
+* @src:				Array of buf_info for input/source
+* @dst:				Array of buf_info for output/destination
+*/
 struct	qcedev_vbuf_info {
 	struct buf_info	src[QCEDEV_MAX_BUFFERS];
 	struct buf_info	dst[QCEDEV_MAX_BUFFERS];
 };
 
 /**
- * struct qcedev_pmem_info - Stores PMEM buffer information
- * @fd_src:			Handle to /dev/adsp_pmem used to allocate
- *				memory for input/src buffer
- * @src:				Array of buf_info for input/source
- * @fd_dst:			Handle to /dev/adsp_pmem used to allocate
- *				memory for output/dst buffer
- * @dst:				Array of buf_info for output/destination
- * @pmem_src_offset:		The offset from input/src buffer
- *				(allocated by PMEM)
- */
+* struct qcedev_pmem_info - Stores PMEM buffer information
+* @fd_src:			Handle to /dev/adsp_pmem used to allocate
+*				memory for input/src buffer
+* @src:				Array of buf_info for input/source
+* @fd_dst:			Handle to /dev/adsp_pmem used to allocate
+*				memory for output/dst buffer
+* @dst:				Array of buf_info for output/destination
+* @pmem_src_offset:		The offset from input/src buffer
+*				(allocated by PMEM)
+*/
 struct	qcedev_pmem_info {
 	int		fd_src;
 	struct buf_info	src[QCEDEV_MAX_BUFFERS];
@@ -131,52 +131,52 @@
 };
 
 /**
- * struct qcedev_cipher_op_req - Holds the ciphering request information
- * @use_pmem (IN):	Flag to indicate if buffer source is PMEM
- *			QCEDEV_USE_PMEM/QCEDEV_NO_PMEM
- * @pmem (IN):		Stores PMEM buffer information.
- *			Refer struct qcedev_pmem_info
- * @vbuf (IN/OUT):	Stores Source and destination Buffer information
- *			Refer to struct qcedev_vbuf_info
- * @data_len (IN):	Total Length of input/src and output/dst in bytes
- * @in_place_op (IN):	Indicates whether the operation is inplace where
- *			source == destination
- *			When using PMEM allocated memory, must set this to 1
- * @enckey (IN):		128 bits of confidentiality key
- *			enckey[0] bit 127-120, enckey[1] bit 119-112,..
- *			enckey[15] bit 7-0
- * @encklen (IN):	Length of the encryption key(set to 128  bits/16
- *			bytes in the driver)
- * @iv (IN/OUT):		Initialisation vector data
- *			This is updated by the driver, incremented by
- *			number of blocks encrypted/decrypted.
- * @ivlen (IN):		Length of the IV
- * @byteoffset (IN):	Offset in the Cipher BLOCK (applicable and to be set
- *			for AES-128 CTR mode only)
- * @alg (IN):		Type of ciphering algorithm: AES/DES/3DES
- * @mode (IN):		Mode use when using AES algorithm: ECB/CBC/CTR
- *			Apllicabel when using AES algorithm only
- * @op (IN):		Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or
- *			QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY
- *
- *If use_pmem is set to 0, the driver assumes that memory was not allocated
- * via PMEM, and kernel will need to allocate memory and copy data from user
- * space buffer (data_src/dta_dst) and process accordingly and copy data back
- * to the user space buffer
- *
- * If use_pmem is set to 1, the driver assumes that memory was allocated via
- * PMEM.
- * The kernel driver will use the fd_src to determine the kernel virtual address
- * base that maps to the user space virtual address base for the  buffer
- * allocated in user space.
- * The final input/src and output/dst buffer pointer will be determined
- * by adding the offsets to the kernel virtual addr.
- *
- * If use of hardware key is supported in the target, user can configure the
- * key parameters (encklen, enckey) to use the hardware key.
- * In order to use the hardware key, set encklen to 0 and set the enckey
- * data array to 0.
- */
+* struct qcedev_cipher_op_req - Holds the ciphering request information
+* @use_pmem (IN):	Flag to indicate if buffer source is PMEM
+*			QCEDEV_USE_PMEM/QCEDEV_NO_PMEM
+* @pmem (IN):		Stores PMEM buffer information.
+*			Refer struct qcedev_pmem_info
+* @vbuf (IN/OUT):	Stores Source and destination Buffer information
+*			Refer to struct qcedev_vbuf_info
+* @data_len (IN):	Total Length of input/src and output/dst in bytes
+* @in_place_op (IN):	Indicates whether the operation is inplace where
+*			source == destination
+*			When using PMEM allocated memory, must set this to 1
+* @enckey (IN):		128 bits of confidentiality key
+*			enckey[0] bit 127-120, enckey[1] bit 119-112,..
+*			enckey[15] bit 7-0
+* @encklen (IN):	Length of the encryption key(set to 128  bits/16
+*			bytes in the driver)
+* @iv (IN/OUT):		Initialisation vector data
+*			This is updated by the driver, incremented by
+*			number of blocks encrypted/decrypted.
+* @ivlen (IN):		Length of the IV
+* @byteoffset (IN):	Offset in the Cipher BLOCK (applicable and to be set
+*			for AES-128 CTR mode only)
+* @alg (IN):		Type of ciphering algorithm: AES/DES/3DES
+* @mode (IN):		Mode use when using AES algorithm: ECB/CBC/CTR
+*			Apllicabel when using AES algorithm only
+* @op (IN):		Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or
+*			QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY
+*
+*If use_pmem is set to 0, the driver assumes that memory was not allocated
+* via PMEM, and kernel will need to allocate memory and copy data from user
+* space buffer (data_src/dta_dst) and process accordingly and copy data back
+* to the user space buffer
+*
+* If use_pmem is set to 1, the driver assumes that memory was allocated via
+* PMEM.
+* The kernel driver will use the fd_src to determine the kernel virtual address
+* base that maps to the user space virtual address base for the  buffer
+* allocated in user space.
+* The final input/src and output/dst buffer pointer will be determined
+* by adding the offsets to the kernel virtual addr.
+*
+* If use of hardware key is supported in the target, user can configure the
+* key parameters (encklen, enckey) to use the hardware key.
+* In order to use the hardware key, set encklen to 0 and set the enckey
+* data array to 0.
+*/
 struct	qcedev_cipher_op_req {
 	uint8_t				use_pmem;
 	union {
@@ -197,16 +197,16 @@
 };
 
 /**
- * struct qcedev_sha_op_req - Holds the hashing request information
- * @data (IN):			Array of pointers to the data to be hashed
- * @entries (IN):		Number of buf_info entries in the data array
- * @data_len (IN):		Length of data to be hashed
- * @digest (IN/OUT):		Returns the hashed data information
- * @diglen (OUT):		Size of the hashed/digest data
- * @authkey (IN):		Pointer to authentication key for HMAC
- * @authklen (IN):		Size of the authentication key
- * @alg (IN):			Secure Hash algorithm
- */
+* struct qcedev_sha_op_req - Holds the hashing request information
+* @data (IN):			Array of pointers to the data to be hashed
+* @entries (IN):		Number of buf_info entries in the data array
+* @data_len (IN):		Length of data to be hashed
+* @digest (IN/OUT):		Returns the hashed data information
+* @diglen (OUT):		Size of the hashed/digest data
+* @authkey (IN):		Pointer to authentication key for HMAC
+* @authklen (IN):		Size of the authentication key
+* @alg (IN):			Secure Hash algorithm
+*/
 struct	qcedev_sha_op_req {
 	struct buf_info			data[QCEDEV_MAX_BUFFERS];
 	uint32_t			entries;
@@ -219,20 +219,16 @@
 };
 
 /**
- * struct qfips_verify_t - Holds data for FIPS Integrity test
- * @kernel_size  (IN):		Size of kernel Image
- * @kernel       (IN):		pointer to buffer containing the kernel Image
- */
+* struct qfips_verify_t - Holds data for FIPS Integrity test
+* @kernel_size  (IN):		Size of kernel Image
+* @kernel       (IN):		pointer to buffer containing the kernel Image
+*/
 struct qfips_verify_t {
 	unsigned int kernel_size;
 	void *kernel;
 };
 
 struct file;
-/* temporiraly comment out for msm-4.9 headfile upgrade */
-/* extern long qcedev_ioctl(struct file *file,
- *                 unsigned int cmd, unsigned long arg);
- */
 
 #define QCEDEV_IOC_MAGIC	0x87
 
diff --git a/include/uapi/linux/qseecom.h b/include/uapi/linux/qseecom.h
index b29072e..94e9b00 100644
--- a/include/uapi/linux/qseecom.h
+++ b/include/uapi/linux/qseecom.h
@@ -277,10 +277,6 @@
 
 struct file;
 
-/* temporiraly comment out for msm-4.9 headfile upgrade */
-/* extern long qseecom_ioctl(struct file *file,
- *             unsigned int cmd, unsigned long arg);
- */
 
 #define QSEECOM_IOC_MAGIC    0x97
 
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index 3e2b24c..fea6a70 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -20,6 +20,7 @@
 #define CAM_CPAS_DEVICE_TYPE      (CAM_DEVICE_TYPE_BASE + 7)
 #define CAM_CSIPHY_DEVICE_TYPE    (CAM_DEVICE_TYPE_BASE + 8)
 #define CAM_ACTUATOR_DEVICE_TYPE  (CAM_DEVICE_TYPE_BASE + 9)
+#define CAM_CCI_DEVICE_TYPE       (CAM_DEVICE_TYPE_BASE + 10)
 
 /* cam_req_mgr hdl info */
 #define CAM_REQ_MGR_HDL_IDX_POS           8
diff --git a/include/uapi/media/msm_media_info.h b/include/uapi/media/msm_media_info.h
index 883c9be..be87b1e 100644
--- a/include/uapi/media/msm_media_info.h
+++ b/include/uapi/media/msm_media_info.h
@@ -1075,7 +1075,7 @@
 		alignment = 128;
 		break;
 	case COLOR_FMT_RGB565_UBWC:
-		alignment = 128;
+		alignment = 256;
 		bpp = 2;
 		break;
 	case COLOR_FMT_RGBA8888_UBWC:
@@ -1279,6 +1279,8 @@
 		size =  MSM_MEDIA_ALIGN(size, 4096);
 		break;
 	case COLOR_FMT_RGBA8888_UBWC:
+	case COLOR_FMT_RGBA1010102_UBWC:
+	case COLOR_FMT_RGB565_UBWC:
 		rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
 							4096);
 		rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
diff --git a/include/uapi/scsi/Kbuild b/include/uapi/scsi/Kbuild
index fad00e0..9a0da84 100644
--- a/include/uapi/scsi/Kbuild
+++ b/include/uapi/scsi/Kbuild
@@ -2,6 +2,8 @@
 header-y += fc/
 header-y += ufs/
 header-y += scsi_bsg_fc.h
+header-y += sg.h
+header-y += scsi_ioctl.h
 header-y += scsi_netlink.h
 header-y += scsi_netlink_fc.h
 header-y += cxlflash_ioctl.h
diff --git a/include/scsi/scsi_ioctl.h b/include/uapi/scsi/scsi_ioctl.h
similarity index 97%
rename from include/scsi/scsi_ioctl.h
rename to include/uapi/scsi/scsi_ioctl.h
index 8d19d1d..516c581a 100644
--- a/include/scsi/scsi_ioctl.h
+++ b/include/uapi/scsi/scsi_ioctl.h
@@ -1,6 +1,8 @@
 #ifndef _SCSI_IOCTL_H
 #define _SCSI_IOCTL_H 
 
+#include <linux/types.h>
+
 #define SCSI_IOCTL_SEND_COMMAND 1
 #define SCSI_IOCTL_TEST_UNIT_READY 2
 #define SCSI_IOCTL_BENCHMARK_COMMAND 3
diff --git a/include/scsi/sg.h b/include/uapi/scsi/sg.h
similarity index 99%
rename from include/scsi/sg.h
rename to include/uapi/scsi/sg.h
index 3afec70..08d3beb 100644
--- a/include/scsi/sg.h
+++ b/include/uapi/scsi/sg.h
@@ -2,6 +2,7 @@
 #define _SCSI_GENERIC_H
 
 #include <linux/compiler.h>
+#include <linux/param.h>
 
 /*
  * History:
@@ -209,7 +210,6 @@
 
 /* Alternate style type names, "..._t" variants preferred */
 typedef struct sg_io_hdr Sg_io_hdr;
-typedef struct sg_io_vec Sg_io_vec;
 typedef struct sg_scsi_id Sg_scsi_id;
 typedef struct sg_req_info Sg_req_info;
 
diff --git a/kernel/configs/android-base.config b/kernel/configs/android-base.config
index 4732628..30e0107 100644
--- a/kernel/configs/android-base.config
+++ b/kernel/configs/android-base.config
@@ -1,11 +1,14 @@
 #  KEEP ALPHABETICALLY SORTED
 # CONFIG_DEVKMEM is not set
 # CONFIG_DEVMEM is not set
+# CONFIG_FHANDLE is not set
 # CONFIG_INET_LRO is not set
 # CONFIG_MODULES is not set
 # CONFIG_OABI_COMPAT is not set
 # CONFIG_SYSVIPC is not set
+# CONFIG_USELIB is not set
 CONFIG_ANDROID=y
+CONFIG_ANDROID_BINDER_DEVICES=binder,hwbinder,vndbinder
 CONFIG_ANDROID_BINDER_IPC=y
 CONFIG_ANDROID_LOW_MEMORY_KILLER=y
 CONFIG_ARMV8_DEPRECATED=y
@@ -23,7 +26,10 @@
 CONFIG_FB=y
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
 CONFIG_INET6_AH=y
+CONFIG_INET6_DIAG_DESTROY=y
 CONFIG_INET6_ESP=y
 CONFIG_INET6_IPCOMP=y
 CONFIG_INET=y
@@ -60,6 +66,9 @@
 CONFIG_IP_NF_TARGET_NETMAP=y
 CONFIG_IP_NF_TARGET_REDIRECT=y
 CONFIG_IP_NF_TARGET_REJECT=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_MODVERSIONS=y
 CONFIG_NET=y
 CONFIG_NETDEVICES=y
 CONFIG_NETFILTER=y
@@ -137,9 +146,9 @@
 CONFIG_PROFILING=y
 CONFIG_QFMT_V2=y
 CONFIG_QUOTA=y
+CONFIG_QUOTACTL=y
 CONFIG_QUOTA_NETLINK_INTERFACE=y
 CONFIG_QUOTA_TREE=y
-CONFIG_QUOTACTL=y
 CONFIG_RANDOMIZE_BASE=y
 CONFIG_RTC_CLASS=y
 CONFIG_RT_GROUP_SCHED=y
@@ -153,16 +162,16 @@
 CONFIG_SWP_EMULATION=y
 CONFIG_SYNC=y
 CONFIG_TUN=y
-CONFIG_UID_CPUTIME=y
+CONFIG_UID_SYS_STATS=y
 CONFIG_UNIX=y
-CONFIG_USB_GADGET=y
 CONFIG_USB_CONFIGFS=y
-CONFIG_USB_CONFIGFS_F_FS=y
-CONFIG_USB_CONFIGFS_F_MTP=y
-CONFIG_USB_CONFIGFS_F_PTP=y
 CONFIG_USB_CONFIGFS_F_ACC=y
 CONFIG_USB_CONFIGFS_F_AUDIO_SRC=y
-CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_FS=y
 CONFIG_USB_CONFIGFS_F_MIDI=y
+CONFIG_USB_CONFIGFS_F_MTP=y
+CONFIG_USB_CONFIGFS_F_PTP=y
+CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_GADGET=y
 CONFIG_USB_OTG_WAKELOCK=y
 CONFIG_XFRM_USER=y
diff --git a/kernel/configs/android-recommended.config b/kernel/configs/android-recommended.config
index abec2ca..36ec6c1 100644
--- a/kernel/configs/android-recommended.config
+++ b/kernel/configs/android-recommended.config
@@ -14,6 +14,7 @@
 CONFIG_BLK_DEV_RAM_SIZE=8192
 CONFIG_CC_STACKPROTECTOR_STRONG=y
 CONFIG_COMPACTION=y
+CONFIG_CPU_SW_DOMAIN_PAN=y
 CONFIG_DEBUG_RODATA=y
 CONFIG_DM_CRYPT=y
 CONFIG_DM_UEVENT=y
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 7c23144..69df75d 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -1060,9 +1060,37 @@
 	return ret;
 }
 
+static int switch_to_rt_policy(void)
+{
+	struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+	unsigned int policy = current->policy;
+	int err;
+
+	/* Nobody should be attempting hotplug from these policy contexts. */
+	if (policy == SCHED_BATCH || policy == SCHED_IDLE ||
+					policy == SCHED_DEADLINE)
+		return -EPERM;
+
+	if (policy == SCHED_FIFO || policy == SCHED_RR)
+		return 1;
+
+	/* Only SCHED_NORMAL left. */
+	err = sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
+	return err;
+
+}
+
+static int switch_to_fair_policy(void)
+{
+	struct sched_param param = { .sched_priority = 0 };
+
+	return sched_setscheduler_nocheck(current, SCHED_NORMAL, &param);
+}
+
 static int do_cpu_up(unsigned int cpu, enum cpuhp_state target)
 {
 	int err = 0;
+	int switch_err = 0;
 
 	if (!cpu_possible(cpu)) {
 		pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
@@ -1073,6 +1101,10 @@
 		return -EINVAL;
 	}
 
+	switch_err = switch_to_rt_policy();
+	if (switch_err < 0)
+		return switch_err;
+
 	err = try_online_node(cpu_to_node(cpu));
 	if (err)
 		return err;
@@ -1087,6 +1119,13 @@
 	err = _cpu_up(cpu, 0, target);
 out:
 	cpu_maps_update_done();
+
+	if (!switch_err) {
+		switch_err = switch_to_fair_policy();
+		pr_err("Hotplug policy switch err. Task %s pid=%d\n",
+					current->comm, current->pid);
+	}
+
 	return err;
 }
 
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 113d325..41f376d 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -10949,6 +10949,7 @@
 }
 device_initcall(perf_event_sysfs_init);
 
+#ifdef CONFIG_HOTPLUG_CPU
 static int perf_cpu_hp_init(void)
 {
 	int ret;
@@ -10963,6 +10964,7 @@
 	return ret;
 }
 subsys_initcall(perf_cpu_hp_init);
+#endif
 
 #ifdef CONFIG_CGROUP_PERF
 static struct cgroup_subsys_state *
diff --git a/kernel/padata.c b/kernel/padata.c
index 7848f05..b4a3c0a 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -190,19 +190,20 @@
 
 	reorder = &next_queue->reorder;
 
+	spin_lock(&reorder->lock);
 	if (!list_empty(&reorder->list)) {
 		padata = list_entry(reorder->list.next,
 				    struct padata_priv, list);
 
-		spin_lock(&reorder->lock);
 		list_del_init(&padata->list);
 		atomic_dec(&pd->reorder_objects);
-		spin_unlock(&reorder->lock);
 
 		pd->processed++;
 
+		spin_unlock(&reorder->lock);
 		goto out;
 	}
+	spin_unlock(&reorder->lock);
 
 	if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
 		padata = ERR_PTR(-ENODATA);
diff --git a/lib/syscall.c b/lib/syscall.c
index 63239e0..a72cd09 100644
--- a/lib/syscall.c
+++ b/lib/syscall.c
@@ -11,6 +11,7 @@
 
 	if (!try_get_task_stack(target)) {
 		/* Task has no stack, so the task isn't in a syscall. */
+		*sp = *pc = 0;
 		*callno = -1;
 		return 0;
 	}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index b6adedb..65c36ac 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4471,6 +4471,7 @@
 {
 	struct page *page = NULL;
 	spinlock_t *ptl;
+	pte_t pte;
 retry:
 	ptl = pmd_lockptr(mm, pmd);
 	spin_lock(ptl);
@@ -4480,12 +4481,13 @@
 	 */
 	if (!pmd_huge(*pmd))
 		goto out;
-	if (pmd_present(*pmd)) {
+	pte = huge_ptep_get((pte_t *)pmd);
+	if (pte_present(pte)) {
 		page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
 		if (flags & FOLL_GET)
 			get_page(page);
 	} else {
-		if (is_hugetlb_entry_migration(huge_ptep_get((pte_t *)pmd))) {
+		if (is_hugetlb_entry_migration(pte)) {
 			spin_unlock(ptl);
 			__migration_entry_wait(mm, (pte_t *)pmd, ptl);
 			goto retry;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8e82002..f61724f4f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2058,8 +2058,12 @@
  * potentially hurts the reliability of high-order allocations when under
  * intense memory pressure but failed atomic allocations should be easier
  * to recover from than an OOM.
+ *
+ * If @force is true, try to unreserve a pageblock even though highatomic
+ * pageblock is exhausted.
  */
-static void unreserve_highatomic_pageblock(const struct alloc_context *ac)
+static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
+						bool force)
 {
 	struct zonelist *zonelist = ac->zonelist;
 	unsigned long flags;
@@ -2067,11 +2071,16 @@
 	struct zone *zone;
 	struct page *page;
 	int order;
+	bool ret;
 
 	for_each_zone_zonelist_nodemask(zone, z, zonelist, ac->high_zoneidx,
 								ac->nodemask) {
-		/* Preserve at least one pageblock */
-		if (zone->nr_reserved_highatomic <= pageblock_nr_pages)
+		/*
+		 * Preserve at least one pageblock unless memory pressure
+		 * is really high.
+		 */
+		if (!force && zone->nr_reserved_highatomic <=
+					pageblock_nr_pages)
 			continue;
 
 		spin_lock_irqsave(&zone->lock, flags);
@@ -2085,13 +2094,25 @@
 				continue;
 
 			/*
-			 * It should never happen but changes to locking could
-			 * inadvertently allow a per-cpu drain to add pages
-			 * to MIGRATE_HIGHATOMIC while unreserving so be safe
-			 * and watch for underflows.
+			 * In page freeing path, migratetype change is racy so
+			 * we can counter several free pages in a pageblock
+			 * in this loop althoug we changed the pageblock type
+			 * from highatomic to ac->migratetype. So we should
+			 * adjust the count once.
 			 */
-			zone->nr_reserved_highatomic -= min(pageblock_nr_pages,
-				zone->nr_reserved_highatomic);
+			if (get_pageblock_migratetype(page) ==
+							MIGRATE_HIGHATOMIC) {
+				/*
+				 * It should never happen but changes to
+				 * locking could inadvertently allow a per-cpu
+				 * drain to add pages to MIGRATE_HIGHATOMIC
+				 * while unreserving so be safe and watch for
+				 * underflows.
+				 */
+				zone->nr_reserved_highatomic -= min(
+						pageblock_nr_pages,
+						zone->nr_reserved_highatomic);
+			}
 
 			/*
 			 * Convert to ac->migratetype and avoid the normal
@@ -2103,12 +2124,16 @@
 			 * may increase.
 			 */
 			set_pageblock_migratetype(page, ac->migratetype);
-			move_freepages_block(zone, page, ac->migratetype);
-			spin_unlock_irqrestore(&zone->lock, flags);
-			return;
+			ret = move_freepages_block(zone, page, ac->migratetype);
+			if (ret) {
+				spin_unlock_irqrestore(&zone->lock, flags);
+				return ret;
+			}
 		}
 		spin_unlock_irqrestore(&zone->lock, flags);
 	}
+
+	return false;
 }
 
 /* Remove an element from the buddy allocator from the fallback list */
@@ -2133,7 +2158,8 @@
 
 		page = list_first_entry(&area->free_list[fallback_mt],
 						struct page, lru);
-		if (can_steal)
+		if (can_steal &&
+			get_pageblock_migratetype(page) != MIGRATE_HIGHATOMIC)
 			steal_suitable_fallback(zone, page, start_migratetype);
 
 		/* Remove the page from the freelists */
@@ -2542,7 +2568,8 @@
 		struct page *endpage = page + (1 << order) - 1;
 		for (; page < endpage; page += pageblock_nr_pages) {
 			int mt = get_pageblock_migratetype(page);
-			if (!is_migrate_isolate(mt) && !is_migrate_cma(mt))
+			if (!is_migrate_isolate(mt) && !is_migrate_cma(mt)
+				&& mt != MIGRATE_HIGHATOMIC)
 				set_pageblock_migratetype(page,
 							  MIGRATE_MOVABLE);
 		}
@@ -3313,7 +3340,7 @@
 	 * Shrink them them and try again
 	 */
 	if (!page && !drained) {
-		unreserve_highatomic_pageblock(ac);
+		unreserve_highatomic_pageblock(ac, false);
 		drain_all_pages(NULL);
 		drained = true;
 		goto retry;
@@ -3430,8 +3457,10 @@
 	 * Make sure we converge to OOM if we cannot make any progress
 	 * several times in the row.
 	 */
-	if (*no_progress_loops > MAX_RECLAIM_RETRIES)
-		return false;
+	if (*no_progress_loops > MAX_RECLAIM_RETRIES) {
+		/* Before OOM, exhaust highatomic_reserve */
+		return unreserve_highatomic_pageblock(ac, true);
+	}
 
 	/*
 	 * Keep reclaiming pages while there is a chance this will lead
diff --git a/mm/rmap.c b/mm/rmap.c
index 1ef3640..cd37c1c 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1295,7 +1295,7 @@
 			goto out;
 	}
 	__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, nr);
-	mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
+	mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, nr);
 out:
 	unlock_page_memcg(page);
 }
@@ -1335,7 +1335,7 @@
 	 * pte lock(a spinlock) is held, which implies preemption disabled.
 	 */
 	__mod_node_page_state(page_pgdat(page), NR_FILE_MAPPED, -nr);
-	mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
+	mem_cgroup_update_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, -nr);
 
 	if (unlikely(PageMlocked(page)))
 		clear_page_mlock(page);
diff --git a/mm/workingset.c b/mm/workingset.c
index 33f6f4d..4c4f056 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -492,7 +492,7 @@
 	pr_info("workingset: timestamp_bits=%d max_order=%d bucket_order=%u\n",
 	       timestamp_bits, max_order, bucket_order);
 
-	ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key);
+	ret = __list_lru_init(&workingset_shadow_nodes, true, &shadow_nodes_key);
 	if (ret)
 		goto err;
 	ret = register_shrinker(&workingset_shadow_shrinker);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 2efb335..25a30be 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -7,6 +7,7 @@
 #include <linux/kthread.h>
 #include <linux/net.h>
 #include <linux/nsproxy.h>
+#include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/socket.h>
 #include <linux/string.h>
@@ -469,11 +470,16 @@
 {
 	struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
 	struct socket *sock;
+	unsigned int noio_flag;
 	int ret;
 
 	BUG_ON(con->sock);
+
+	/* sock_create_kern() allocates with GFP_KERNEL */
+	noio_flag = memalloc_noio_save();
 	ret = sock_create_kern(read_pnet(&con->msgr->net), paddr->ss_family,
 			       SOCK_STREAM, IPPROTO_TCP, &sock);
+	memalloc_noio_restore(noio_flag);
 	if (ret)
 		return ret;
 	sock->sk->sk_allocation = GFP_NOFS;
diff --git a/sound/core/seq/seq_fifo.c b/sound/core/seq/seq_fifo.c
index 3f4efcb..3490d21 100644
--- a/sound/core/seq/seq_fifo.c
+++ b/sound/core/seq/seq_fifo.c
@@ -265,6 +265,10 @@
 	/* NOTE: overflow flag is not cleared */
 	spin_unlock_irqrestore(&f->lock, flags);
 
+	/* close the old pool and wait until all users are gone */
+	snd_seq_pool_mark_closing(oldpool);
+	snd_use_lock_sync(&f->use_lock);
+
 	/* release cells in old pool */
 	for (cell = oldhead; cell; cell = next) {
 		next = cell->next;
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 112caa2..bb1aad3 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4846,6 +4846,7 @@
 	ALC292_FIXUP_DISABLE_AAMIX,
 	ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK,
 	ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+	ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
 	ALC275_FIXUP_DELL_XPS,
 	ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
 	ALC293_FIXUP_LENOVO_SPK_NOISE,
@@ -5446,6 +5447,15 @@
 		.chained = true,
 		.chain_id = ALC269_FIXUP_HEADSET_MODE
 	},
+	[ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE] = {
+		.type = HDA_FIXUP_PINS,
+		.v.pins = (const struct hda_pintbl[]) {
+			{ 0x18, 0x01a1913c }, /* use as headset mic, without its own jack detect */
+			{ }
+		},
+		.chained = true,
+		.chain_id = ALC269_FIXUP_HEADSET_MODE
+	},
 	[ALC275_FIXUP_DELL_XPS] = {
 		.type = HDA_FIXUP_VERBS,
 		.v.verbs = (const struct hda_verb[]) {
@@ -5518,7 +5528,7 @@
 		.type = HDA_FIXUP_FUNC,
 		.v.func = alc298_fixup_speaker_volume,
 		.chained = true,
-		.chain_id = ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
+		.chain_id = ALC298_FIXUP_DELL_AIO_MIC_NO_PRESENCE,
 	},
 	[ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
 		.type = HDA_FIXUP_PINS,
diff --git a/sound/soc/atmel/atmel-classd.c b/sound/soc/atmel/atmel-classd.c
index 89ac5f5..7ae46c2 100644
--- a/sound/soc/atmel/atmel-classd.c
+++ b/sound/soc/atmel/atmel-classd.c
@@ -349,7 +349,7 @@
 }
 
 #define CLASSD_ACLK_RATE_11M2896_MPY_8 (112896 * 100 * 8)
-#define CLASSD_ACLK_RATE_12M288_MPY_8  (12228 * 1000 * 8)
+#define CLASSD_ACLK_RATE_12M288_MPY_8  (12288 * 1000 * 8)
 
 static struct {
 	int rate;
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index b4867ff..9c6f471 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -888,6 +888,8 @@
 config SND_SOC_WCD9335
         tristate
 	depends on WCD9335_CODEC
+	select SND_SOC_WCD_MBHC
+	select SND_SOC_WCD_MBHC_LEGACY
 
 config SND_SOC_WCD934X
         tristate
@@ -903,6 +905,7 @@
         tristate
 	depends on SND_SOC_WCD934X
 	select SND_SOC_WCD_MBHC
+	select SND_SOC_WCD_MBHC_ADC
 
 config SND_SOC_WSA881X
         tristate
@@ -931,8 +934,6 @@
 
 config SND_SOC_WCD_MBHC
 	tristate
-	default y if (SND_SOC_MSM8909_WCD=y || SND_SOC_SDM660_CDC=y || SND_SOC_WCD9335=y) && SND_SOC_MDMCALIFORNIUM!=y
-	select SND_SOC_WCD_MBHC_LEGACY
 
 config SND_SOC_WCD_MBHC_LEGACY
 	tristate
diff --git a/sound/soc/codecs/sdm660_cdc/Kconfig b/sound/soc/codecs/sdm660_cdc/Kconfig
index d370da3..2f36c39 100644
--- a/sound/soc/codecs/sdm660_cdc/Kconfig
+++ b/sound/soc/codecs/sdm660_cdc/Kconfig
@@ -1,3 +1,5 @@
 
 config SND_SOC_SDM660_CDC
 	tristate "MSM Internal PMIC based codec"
+	select SND_SOC_WCD_MBHC
+	select SND_SOC_WCD_MBHC_LEGACY
diff --git a/sound/soc/codecs/wcd-mbhc-adc.c b/sound/soc/codecs/wcd-mbhc-adc.c
index 2c7d667..7278431 100644
--- a/sound/soc/codecs/wcd-mbhc-adc.c
+++ b/sound/soc/codecs/wcd-mbhc-adc.c
@@ -354,7 +354,7 @@
 {
 	bool spl_hs = false;
 	int output_mv = 0;
-	int adc_threshold = 0;
+	int adc_threshold = 0, adc_hph_threshold = 0;
 
 	pr_debug("%s: enter\n", __func__);
 	if (!mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
@@ -372,8 +372,11 @@
 	output_mv = wcd_measure_adc_once(mbhc, MUX_CTL_IN2P);
 	adc_threshold = ((WCD_MBHC_ADC_HS_THRESHOLD_MV *
 			  wcd_mbhc_get_micbias(mbhc))/WCD_MBHC_ADC_MICBIAS_MV);
+	adc_hph_threshold = ((WCD_MBHC_ADC_HPH_THRESHOLD_MV *
+			      wcd_mbhc_get_micbias(mbhc))/
+			      WCD_MBHC_ADC_MICBIAS_MV);
 
-	if (output_mv > adc_threshold) {
+	if (output_mv > adc_threshold || output_mv < adc_hph_threshold) {
 		spl_hs = false;
 	} else {
 		spl_hs = true;
@@ -382,15 +385,16 @@
 	}
 
 	/* MB2 back to 1.8v if the type is not special headset */
-	if (!spl_hs) {
+	if (spl_hs_cnt && (*spl_hs_cnt != WCD_MBHC_SPL_HS_CNT)) {
 		mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(mbhc->codec,
 				mbhc->mbhc_cfg->mbhc_micbias, false);
 		/* Add 10ms delay for micbias to settle */
 		usleep_range(10000, 10100);
-	} else {
-		pr_debug("%s: Detected special HS (%d)\n", __func__, spl_hs);
 	}
 
+	if (spl_hs)
+		pr_debug("%s: Detected special HS (%d)\n", __func__, spl_hs);
+
 exit:
 	pr_debug("%s: leave\n", __func__);
 	return spl_hs;
@@ -401,28 +405,63 @@
 	int delay = 0;
 	bool ret = false;
 	bool is_spl_hs = false;
-	int spl_hs_count = 0;
+	int output_mv = 0;
+	int adc_threshold = 0;
+
+	/*
+	 * Increase micbias to 2.7V to detect headsets with
+	 * threshold on microphone
+	 */
+	if (mbhc->mbhc_cb->mbhc_micbias_control &&
+	    !mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic) {
+		pr_debug("%s: callback fn micb_ctrl_thr_mic not defined\n",
+			 __func__);
+		return false;
+	} else if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic) {
+		ret = mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(mbhc->codec,
+							MIC_BIAS_2, true);
+		if (ret) {
+			pr_err("%s: mbhc_micb_ctrl_thr_mic failed, ret: %d\n",
+				__func__, ret);
+			return false;
+		}
+	}
+
+	adc_threshold = ((WCD_MBHC_ADC_HS_THRESHOLD_MV *
+			  wcd_mbhc_get_micbias(mbhc)) /
+			  WCD_MBHC_ADC_MICBIAS_MV);
 
 	while (!is_spl_hs) {
-		delay += 50;
 		if (mbhc->hs_detect_work_stop) {
 			pr_debug("%s: stop requested: %d\n", __func__,
 					mbhc->hs_detect_work_stop);
 			break;
 		}
+		delay += 50;
 		/* Wait for 50ms for FSM to update result */
 		msleep(50);
-		is_spl_hs = wcd_mbhc_adc_check_for_spl_headset(mbhc,
-							       &spl_hs_count);
-		if (is_spl_hs)
-			pr_debug("%s: Spl headset detected in %d msecs\n",
+		output_mv = wcd_measure_adc_once(mbhc, MUX_CTL_IN2P);
+		if (output_mv <= adc_threshold) {
+			pr_debug("%s: Special headset detected in %d msecs\n",
 					__func__, delay);
+			is_spl_hs = true;
+		}
+
 		if (delay == SPECIAL_HS_DETECT_TIME_MS) {
 			pr_debug("%s: Spl headset not found in 2 sec\n",
 				 __func__);
 			break;
 		}
 	}
+	if (is_spl_hs) {
+		pr_debug("%s: Headset with threshold found\n",  __func__);
+		mbhc->micbias_enable = true;
+		ret = true;
+	}
+	if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic &&
+	    !mbhc->micbias_enable)
+		mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(mbhc->codec, MIC_BIAS_2,
+						      false);
 	pr_debug("%s: leave, micb_enable: %d\n", __func__,
 		  mbhc->micbias_enable);
 
@@ -533,19 +572,6 @@
 	return plug_type;
 }
 
-static int wcd_mbhc_get_plug_type(struct wcd_mbhc *mbhc)
-{
-	int result_mv = 0;
-
-	/*
-	 * Use ADC single mode to minimize the chance of missing out
-	 * btn press/release for HEADSET type during correct work.
-	 */
-	result_mv = wcd_measure_adc_once(mbhc, MUX_CTL_IN2P);
-
-	return wcd_mbhc_get_plug_from_adc(result_mv);
-}
-
 static void wcd_correct_swch_plug(struct work_struct *work)
 {
 	struct wcd_mbhc *mbhc;
@@ -553,8 +579,9 @@
 	enum wcd_mbhc_plug_type plug_type = MBHC_PLUG_TYPE_INVALID;
 	unsigned long timeout;
 	bool wrk_complete = false;
-	int gnd_mic_swap_cnt = 0;
-	bool is_pa_on = false, spl_hs = false;
+	int pt_gnd_mic_swap_cnt = 0;
+	int no_gnd_mic_swap_cnt = 0;
+	bool is_pa_on = false, spl_hs = false, spl_hs_reported = false;
 	int ret = 0;
 	int spl_hs_count = 0;
 	int output_mv = 0;
@@ -647,8 +674,11 @@
 			spl_hs = wcd_mbhc_adc_check_for_spl_headset(mbhc,
 								&spl_hs_count);
 
-			if (spl_hs_count == WCD_MBHC_SPL_HS_CNT)
+			if (spl_hs_count == WCD_MBHC_SPL_HS_CNT) {
+				output_mv = WCD_MBHC_ADC_HS_THRESHOLD_MV;
+				spl_hs = true;
 				mbhc->micbias_enable = true;
+			}
 		}
 
 		if (mbhc->mbhc_cb->hph_pa_on_status)
@@ -660,9 +690,14 @@
 			ret = wcd_check_cross_conn(mbhc);
 			if (ret < 0)
 				continue;
-			if (ret > 0) {
-				/* Found cross connection, swap mic/gnd */
-				if (gnd_mic_swap_cnt > GND_MIC_SWAP_THRESHOLD) {
+			else if (ret > 0) {
+				pt_gnd_mic_swap_cnt++;
+				no_gnd_mic_swap_cnt = 0;
+				if (pt_gnd_mic_swap_cnt <
+						GND_MIC_SWAP_THRESHOLD) {
+					continue;
+				} else if (pt_gnd_mic_swap_cnt >
+					   GND_MIC_SWAP_THRESHOLD) {
 					/*
 					 * This is due to GND/MIC switch didn't
 					 * work,  Report unsupported plug.
@@ -671,37 +706,57 @@
 						 __func__);
 					plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
 					goto report;
+				} else {
+					plug_type = MBHC_PLUG_TYPE_GND_MIC_SWAP;
 				}
-				gnd_mic_swap_cnt++;
+			} else {
+				no_gnd_mic_swap_cnt++;
+				pt_gnd_mic_swap_cnt = 0;
+				plug_type = wcd_mbhc_get_plug_from_adc(
+						output_mv);
+				if ((no_gnd_mic_swap_cnt <
+				    GND_MIC_SWAP_THRESHOLD) &&
+				    (spl_hs_count != WCD_MBHC_SPL_HS_CNT)) {
+					continue;
+				} else {
+					no_gnd_mic_swap_cnt = 0;
+				}
+			}
+			if ((pt_gnd_mic_swap_cnt == GND_MIC_SWAP_THRESHOLD) &&
+				(plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP)) {
+				/*
+				 * if switch is toggled, check again,
+				 * otherwise report unsupported plug
+				 */
 				if (mbhc->mbhc_cfg->swap_gnd_mic &&
 					mbhc->mbhc_cfg->swap_gnd_mic(codec)) {
 					pr_debug("%s: US_EU gpio present,flip switch\n"
 						, __func__);
 					continue;
 				}
-			} else {
-				gnd_mic_swap_cnt++;
-				plug_type = wcd_mbhc_get_plug_type(mbhc);
-				if ((gnd_mic_swap_cnt <=
-				    GND_MIC_SWAP_THRESHOLD) &&
-				    (spl_hs_count != WCD_MBHC_SPL_HS_CNT)) {
-					continue;
-				} else {
-					gnd_mic_swap_cnt = 0;
-				}
 			}
 		}
 
-		if (!spl_hs && (plug_type == MBHC_PLUG_TYPE_HIGH_HPH)) {
+		if (output_mv > WCD_MBHC_ADC_HS_THRESHOLD_MV) {
 			pr_debug("%s: cable is extension cable\n", __func__);
+			plug_type = MBHC_PLUG_TYPE_HIGH_HPH;
 			wrk_complete = true;
 		} else {
+			pr_debug("%s: cable might be headset: %d\n", __func__,
+				 plug_type);
 			if (plug_type != MBHC_PLUG_TYPE_GND_MIC_SWAP) {
-				if (!spl_hs)
-					plug_type =
-						wcd_mbhc_get_plug_type(mbhc);
-				else
-					plug_type = MBHC_PLUG_TYPE_HEADSET;
+				plug_type = wcd_mbhc_get_plug_from_adc(
+						output_mv);
+				if (!spl_hs_reported &&
+				    spl_hs_count == WCD_MBHC_SPL_HS_CNT) {
+					spl_hs_reported = true;
+					WCD_MBHC_RSC_LOCK(mbhc);
+					wcd_mbhc_find_plug_and_report(mbhc,
+								    plug_type);
+					WCD_MBHC_RSC_UNLOCK(mbhc);
+					continue;
+				} else if (spl_hs_reported)
+					continue;
 				/*
 				 * Report headset only if not already reported
 				 * and if there is not button press without
@@ -710,12 +765,13 @@
 				if ((mbhc->current_plug !=
 				      MBHC_PLUG_TYPE_HEADSET) &&
 				     (mbhc->current_plug !=
-				     MBHC_PLUG_TYPE_ANC_HEADPHONE)) {
-					if (plug_type == MBHC_PLUG_TYPE_HEADSET)
-						pr_debug("%s: cable is %s headset\n",
-							__func__,
-							((spl_hs) ?
-							 "special ":""));
+				     MBHC_PLUG_TYPE_ANC_HEADPHONE) &&
+				    !wcd_swch_level_remove(mbhc)) {
+					pr_debug("%s: cable is %s headset\n",
+						 __func__,
+						((spl_hs_count ==
+							WCD_MBHC_SPL_HS_CNT) ?
+							"special ":""));
 					goto report;
 				}
 			}
@@ -782,6 +838,29 @@
 	    !mbhc->micbias_enable)
 		mbhc->mbhc_cb->mbhc_micbias_control(codec, MIC_BIAS_2,
 						    MICB_DISABLE);
+
+	/*
+	 * If plug type is corrected from special headset to headphone,
+	 * clear the micbias enable flag, set micbias back to 1.8V and
+	 * disable micbias.
+	 */
+	if (plug_type == MBHC_PLUG_TYPE_HEADPHONE &&
+	    mbhc->micbias_enable) {
+		if (mbhc->mbhc_cb->mbhc_micbias_control)
+			mbhc->mbhc_cb->mbhc_micbias_control(
+					codec, MIC_BIAS_2,
+					MICB_DISABLE);
+		if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
+			mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
+					codec,
+					MIC_BIAS_2, false);
+		if (mbhc->mbhc_cb->set_micbias_value) {
+			mbhc->mbhc_cb->set_micbias_value(codec);
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MICB_CTRL, 0);
+		}
+		mbhc->micbias_enable = false;
+	}
+
 	if (mbhc->mbhc_cfg->detect_extn_cable &&
 	    ((plug_type == MBHC_PLUG_TYPE_HEADPHONE) ||
 	     (plug_type == MBHC_PLUG_TYPE_HEADSET)) &&
@@ -813,9 +892,41 @@
 static irqreturn_t wcd_mbhc_adc_hs_rem_irq(int irq, void *data)
 {
 	struct wcd_mbhc *mbhc = data;
+	unsigned long timeout;
+	int adc_threshold, output_mv, retry = 0;
 
 	pr_debug("%s: enter\n", __func__);
 	WCD_MBHC_RSC_LOCK(mbhc);
+
+	timeout = jiffies +
+		  msecs_to_jiffies(WCD_FAKE_REMOVAL_MIN_PERIOD_MS);
+	adc_threshold = ((WCD_MBHC_ADC_HS_THRESHOLD_MV *
+			  wcd_mbhc_get_micbias(mbhc)) /
+			  WCD_MBHC_ADC_MICBIAS_MV);
+	do {
+		retry++;
+		/*
+		 * read output_mv every 10ms to look for
+		 * any change in IN2_P
+		 */
+		usleep_range(10000, 10100);
+		output_mv = wcd_measure_adc_once(mbhc, MUX_CTL_IN2P);
+
+		pr_debug("%s: Check for fake removal: output_mv %d\n",
+			 __func__, output_mv);
+		if ((output_mv <= adc_threshold) &&
+		    retry > FAKE_REM_RETRY_ATTEMPTS) {
+			pr_debug("%s: headset is NOT actually removed\n",
+				 __func__);
+			goto exit;
+		}
+	} while (!time_after(jiffies, timeout));
+
+	if (wcd_swch_level_remove(mbhc)) {
+		pr_debug("%s: Switch level is low ", __func__);
+		goto exit;
+	}
+
 	/*
 	 * ADC COMPLETE and ELEC_REM interrupts are both enabled for HEADPHONE,
 	 * need to reject the ADC COMPLETE interrupt which follows ELEC_REM one
@@ -827,6 +938,7 @@
 	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_MODE, 0);
 	WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_ADC_EN, 0);
 	wcd_mbhc_elec_hs_report_unplug(mbhc);
+exit:
 	WCD_MBHC_RSC_UNLOCK(mbhc);
 	pr_debug("%s: leave\n", __func__);
 	return IRQ_HANDLED;
diff --git a/sound/soc/codecs/wcd-mbhc-legacy.c b/sound/soc/codecs/wcd-mbhc-legacy.c
index ffba7f6..83023bc 100644
--- a/sound/soc/codecs/wcd-mbhc-legacy.c
+++ b/sound/soc/codecs/wcd-mbhc-legacy.c
@@ -445,7 +445,7 @@
 	bool wrk_complete = false;
 	int pt_gnd_mic_swap_cnt = 0;
 	int no_gnd_mic_swap_cnt = 0;
-	bool is_pa_on = false, spl_hs = false;
+	bool is_pa_on = false, spl_hs = false, spl_hs_reported = false;
 	bool micbias2 = false;
 	bool micbias1 = false;
 	int ret = 0;
@@ -652,6 +652,16 @@
 					plug_type);
 			if (!(plug_type == MBHC_PLUG_TYPE_GND_MIC_SWAP)) {
 				plug_type = MBHC_PLUG_TYPE_HEADSET;
+				if (!spl_hs_reported &&
+				    spl_hs_count == WCD_MBHC_SPL_HS_CNT) {
+					spl_hs_reported = true;
+					WCD_MBHC_RSC_LOCK(mbhc);
+					wcd_mbhc_find_plug_and_report(mbhc,
+								    plug_type);
+					WCD_MBHC_RSC_UNLOCK(mbhc);
+					continue;
+				} else if (spl_hs_reported)
+					continue;
 				/*
 				 * Report headset only if not already reported
 				 * and if there is not button press without
@@ -726,6 +736,29 @@
 	    !mbhc->micbias_enable)
 		mbhc->mbhc_cb->mbhc_micbias_control(codec, MIC_BIAS_2,
 						    MICB_DISABLE);
+
+	/*
+	 * If plug type is corrected from special headset to headphone,
+	 * clear the micbias enable flag, set micbias back to 1.8V and
+	 * disable micbias.
+	 */
+	if (plug_type == MBHC_PLUG_TYPE_HEADPHONE &&
+	    mbhc->micbias_enable) {
+		if (mbhc->mbhc_cb->mbhc_micbias_control)
+			mbhc->mbhc_cb->mbhc_micbias_control(
+					codec, MIC_BIAS_2,
+					MICB_DISABLE);
+		if (mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic)
+			mbhc->mbhc_cb->mbhc_micb_ctrl_thr_mic(
+					codec,
+					MIC_BIAS_2, false);
+		if (mbhc->mbhc_cb->set_micbias_value) {
+			mbhc->mbhc_cb->set_micbias_value(codec);
+			WCD_MBHC_REG_UPDATE_BITS(WCD_MBHC_MICB_CTRL, 0);
+		}
+		mbhc->micbias_enable = false;
+	}
+
 	if (mbhc->mbhc_cb->micbias_enable_status) {
 		micbias1 = mbhc->mbhc_cb->micbias_enable_status(mbhc,
 								MIC_BIAS_1);
diff --git a/sound/soc/codecs/wcd-mbhc-v2.h b/sound/soc/codecs/wcd-mbhc-v2.h
index dd3d35c..4ea4401 100644
--- a/sound/soc/codecs/wcd-mbhc-v2.h
+++ b/sound/soc/codecs/wcd-mbhc-v2.h
@@ -150,7 +150,7 @@
 
 #define WCD_MBHC_BTN_PRESS_COMPL_TIMEOUT_MS  50
 #define ANC_DETECT_RETRY_CNT 7
-#define WCD_MBHC_SPL_HS_CNT  2
+#define WCD_MBHC_SPL_HS_CNT  1
 
 enum wcd_mbhc_detect_logic {
 	WCD_DETECTION_LEGACY,
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index b5b1934..bef8a45 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -448,7 +448,7 @@
 			if (bc->set_params != SKL_PARAM_INIT)
 				continue;
 
-			mconfig->formats_config.caps = (u32 *)&bc->params;
+			mconfig->formats_config.caps = (u32 *)bc->params;
 			mconfig->formats_config.caps_size = bc->size;
 
 			break;
diff --git a/sound/soc/msm/msm8996.c b/sound/soc/msm/msm8996.c
index bc5f7e5..45c5479 100644
--- a/sound/soc/msm/msm8996.c
+++ b/sound/soc/msm/msm8996.c
@@ -351,7 +351,7 @@
 static int msm8996_set_spk(struct snd_kcontrol *kcontrol,
 			      struct snd_ctl_elem_value *ucontrol)
 {
-	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 
 	pr_debug("%s() ucontrol->value.integer.value[0] = %ld\n",
 		 __func__, ucontrol->value.integer.value[0]);
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index a29786d..4d28a9d 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -870,7 +870,8 @@
 			continue;
 
 		kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
-		kvm->buses[bus_idx]->ioeventfd_count--;
+		if (kvm->buses[bus_idx])
+			kvm->buses[bus_idx]->ioeventfd_count--;
 		ioeventfd_release(p);
 		ret = 0;
 		break;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 7f9ee29..f4c6d4f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -720,8 +720,11 @@
 	list_del(&kvm->vm_list);
 	spin_unlock(&kvm_lock);
 	kvm_free_irq_routing(kvm);
-	for (i = 0; i < KVM_NR_BUSES; i++)
-		kvm_io_bus_destroy(kvm->buses[i]);
+	for (i = 0; i < KVM_NR_BUSES; i++) {
+		if (kvm->buses[i])
+			kvm_io_bus_destroy(kvm->buses[i]);
+		kvm->buses[i] = NULL;
+	}
 	kvm_coalesced_mmio_free(kvm);
 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
 	mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
@@ -3463,6 +3466,8 @@
 	};
 
 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+	if (!bus)
+		return -ENOMEM;
 	r = __kvm_io_bus_write(vcpu, bus, &range, val);
 	return r < 0 ? r : 0;
 }
@@ -3480,6 +3485,8 @@
 	};
 
 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+	if (!bus)
+		return -ENOMEM;
 
 	/* First try the device referenced by cookie. */
 	if ((cookie >= 0) && (cookie < bus->dev_count) &&
@@ -3530,6 +3537,8 @@
 	};
 
 	bus = srcu_dereference(vcpu->kvm->buses[bus_idx], &vcpu->kvm->srcu);
+	if (!bus)
+		return -ENOMEM;
 	r = __kvm_io_bus_read(vcpu, bus, &range, val);
 	return r < 0 ? r : 0;
 }
@@ -3542,6 +3551,9 @@
 	struct kvm_io_bus *new_bus, *bus;
 
 	bus = kvm->buses[bus_idx];
+	if (!bus)
+		return -ENOMEM;
+
 	/* exclude ioeventfd which is limited by maximum fd */
 	if (bus->dev_count - bus->ioeventfd_count > NR_IOBUS_DEVS - 1)
 		return -ENOSPC;
@@ -3561,37 +3573,41 @@
 }
 
 /* Caller must hold slots_lock. */
-int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
-			      struct kvm_io_device *dev)
+void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
+			       struct kvm_io_device *dev)
 {
-	int i, r;
+	int i;
 	struct kvm_io_bus *new_bus, *bus;
 
 	bus = kvm->buses[bus_idx];
-	r = -ENOENT;
+	if (!bus)
+		return;
+
 	for (i = 0; i < bus->dev_count; i++)
 		if (bus->range[i].dev == dev) {
-			r = 0;
 			break;
 		}
 
-	if (r)
-		return r;
+	if (i == bus->dev_count)
+		return;
 
 	new_bus = kmalloc(sizeof(*bus) + ((bus->dev_count - 1) *
 			  sizeof(struct kvm_io_range)), GFP_KERNEL);
-	if (!new_bus)
-		return -ENOMEM;
+	if (!new_bus)  {
+		pr_err("kvm: failed to shrink bus, removing it completely\n");
+		goto broken;
+	}
 
 	memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
 	new_bus->dev_count--;
 	memcpy(new_bus->range + i, bus->range + i + 1,
 	       (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
 
+broken:
 	rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
 	synchronize_srcu_expedited(&kvm->srcu);
 	kfree(bus);
-	return r;
+	return;
 }
 
 struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx,
@@ -3604,6 +3620,8 @@
 	srcu_idx = srcu_read_lock(&kvm->srcu);
 
 	bus = srcu_dereference(kvm->buses[bus_idx], &kvm->srcu);
+	if (!bus)
+		goto out_unlock;
 
 	dev_idx = kvm_io_bus_get_first_dev(bus, addr, 1);
 	if (dev_idx < 0)