Merge "Documentation: Add DT bindings for camera sensor modules" into msm-4.9
diff --git a/Documentation/crypto/msm/msm_ice_driver.txt b/Documentation/crypto/msm/msm_ice_driver.txt
new file mode 100644
index 0000000..ddb8176
--- /dev/null
+++ b/Documentation/crypto/msm/msm_ice_driver.txt
@@ -0,0 +1,235 @@
+Introduction:
+=============
+Storage encryption has been one of the most required feature from security
+point of view. QTI based storage encryption solution uses general purpose
+crypto engine. While this kind of solution provide a decent amount of
+performance, it falls short as storage speed is improving significantly
+continuously. To overcome performance degradation, newer chips are going to
+have Inline Crypto Engine (ICE) embedded into storage device. ICE is supposed
+to meet the line speed of storage devices.
+
+Hardware Description
+====================
+ICE is a HW block that is embedded into storage device such as UFS/eMMC. By
+default, ICE works in bypass mode i.e. ICE HW does not perform any crypto
+operation on data to be processed by storage device. If required, ICE can be
+configured to perform crypto operation in one direction (i.e. either encryption
+or decryption) or in both direction(both encryption & decryption).
+
+When a switch between the operation modes(plain to crypto or crypto to plain)
+is desired for a particular partition, SW must complete all transactions for
+that particular partition before switching the crypto mode i.e. no crypto, one
+direction crypto or both direction crypto operation. Requests for other
+partitions are not impacted due to crypto mode switch.
+
+ICE HW currently supports AES128/256 bit ECB & XTS mode encryption algorithms.
+
+Keys for crypto operations are loaded from SW. Keys are stored in a lookup
+table(LUT) located inside ICE HW. Maximum of 32 keys can be loaded in ICE key
+LUT. A Key inside the LUT can be referred using a key index.
+
+SW Description
+==============
+ICE HW has catagorized ICE registers in 2 groups: those which can be accessed by
+only secure side i.e. TZ and those which can be accessed by non-secure side such
+as HLOS as well. This requires that ICE driver to be split in two pieces: one
+running from TZ space and another from HLOS space.
+
+ICE driver from TZ would configure keys as requested by HLOS side.
+
+ICE driver on HLOS side is responsible for initialization of ICE HW.
+
+SW Architecture Diagram
+=======================
+Following are all the components involved in the ICE driver for control path:
+
++++++++++++++++++++++++++++++++++++++++++
++               App layer               +
++++++++++++++++++++++++++++++++++++++++++
++             System layer              +
++   ++++++++         +++++++            +
++   + VOLD +         + PFM +            +
++   ++++++++         +++++++            +
++         ||         ||                 +
++         ||         ||                 +
++         \/         \/                 +
++        ++++++++++++++                 +
++        + LibQSEECom +                 +
++        ++++++++++++++                 +
++++++++++++++++++++++++++++++++++++++++++
++             Kernel                    +       +++++++++++++++++
++                                       +       +     KMS       +
++  +++++++  +++++++++++  +++++++++++    +       +++++++++++++++++
++  + ICE +  + Storage +  + QSEECom +    +       +   ICE Driver  +
++++++++++++++++++++++++++++++++++++++++++ <===> +++++++++++++++++
+               ||                                    ||
+               ||                                    ||
+               \/                                    \/
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
++                      Storage Device                           +
++                      ++++++++++++++                           +
++                      +   ICE HW   +                           +
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
+
+Use Cases:
+----------
+a) Device bootup
+ICE HW is detected during bootup time and corresponding probe function is
+called. ICE driver parses its data from device tree node. ICE HW and storage
+HW are tightly coupled. Storage device probing is dependent upon ICE device
+probing. ICE driver configures all the required registers to put the ICE HW
+in bypass mode.
+
+b) Configuring keys
+Currently, there are couple of use cases to configure the keys.
+
+1) Full Disk Encryption(FDE)
+System layer(VOLD) at invocation of apps layer would call libqseecom to create
+the encryption key. Libqseecom calls qseecom driver to communicate with KMS
+module on the secure side i.e. TZ. KMS would call ICE driver on the TZ side to
+create and set the keys in ICE HW. At the end of transaction, VOLD would have
+key index of key LUT where encryption key is present.
+
+2) Per File Encryption (PFE)
+Per File Manager(PFM) calls QSEECom api to create the key. PFM has a peer comp-
+onent(PFT) at kernel layer which gets the corresponding key index from PFM.
+
+Following are all the components involved in the ICE driver for data path:
+
++++++++++++++++++++++++++++++++++++++++++
++               App layer               +
++++++++++++++++++++++++++++++++++++++++++
++              VFS                      +
++---------------------------------------+
++         File System (EXT4)            +
++---------------------------------------+
++             Block Layer               +
++ --------------------------------------+
++                              +++++++  +
++              dm-req-crypt => + PFT +  +
++                              +++++++  +
++                                       +
++---------------------------------------+
++    +++++++++++           +++++++      +
++    + Storage +           + ICE +      +
++++++++++++++++++++++++++++++++++++++++++
++                  ||                   +
++                  || (Storage Req with +
++                  \/  ICE parameters ) +
++++++++++++++++++++++++++++++++++++++++++
++          Storage Device               +
++          ++++++++++++++               +
++          +   ICE HW   +               +
++++++++++++++++++++++++++++++++++++++++++
+
+c) Data transaction
+Once the crypto key has been configured, VOLD/PFM creates device mapping for
+data partition. As part of device mapping VOLD passes key index, crypto
+algorithm, mode and key length to dm layer. In case of PFE, keys are provided
+by PFT as and when request is processed by dm-req-crypt. When any application
+needs to read/write data, it would go through DM layer which would add crypto
+information, provided by VOLD/PFT, to Request. For each Request, Storage driver
+would ask ICE driver to configure crypto part of request. ICE driver extracts
+crypto data from Request structure and provide it to storage driver which would
+finally dispatch request to storage device.
+
+d) Error Handling
+Due to issue # 1 mentioned in "Known Issues", ICE driver does not register for
+any interrupt. However, it enables sources of interrupt for ICE HW. After each
+data transaction, Storage driver receives transaction completion event. As part
+of event handling, storage driver calls  ICE driver to check if any of ICE
+interrupt status is set. If yes, storage driver returns error to upper layer.
+
+Error handling would be changed in future chips.
+
+Interfaces
+==========
+ICE driver exposes interfaces for storage driver to :
+1. Get the global instance of ICE driver
+2. Get the implemented interfaces of the particular ice instance
+3. Initialize the ICE HW
+4. Reset the ICE HW
+5. Resume/Suspend the ICE HW
+6. Get the Crypto configuration for the data request for storage
+7. Check if current data transaction has generated any interrupt
+
+Driver Parameters
+=================
+This driver is built and statically linked into the kernel; therefore,
+there are no module parameters supported by this driver.
+
+There are no kernel command line parameters supported by this driver.
+
+Power Management
+================
+ICE driver does not do power management on its own as it is part of storage
+hardware. Whenever storage driver receives request for power collapse/suspend
+resume, it would call ICE driver which exposes APIs for Storage HW. ICE HW
+during power collapse or reset, wipes crypto configuration data. When ICE
+driver receives request to resume, it would ask ICE driver on TZ side to
+restore the configuration. ICE driver does not do anything as part of power
+collapse or suspend event.
+
+Interface:
+==========
+ICE driver exposes following APIs for storage driver to use:
+
+int (*init)(struct platform_device *, void *, ice_success_cb, ice_error_cb);
+	-- This function is invoked by storage controller during initialization of
+	storage controller. Storage controller would provide success and error call
+	backs which would be invoked asynchronously once ICE HW init is done.
+
+int (*reset)(struct platform_device *);
+	-- ICE HW reset as part of storage controller reset. When storage controller
+	received reset command, it would call reset on ICE HW. As of now, ICE HW
+	does not need to do anything as part of reset.
+
+int (*resume)(struct platform_device *);
+	-- ICE HW while going to reset, wipes all crypto keys and other data from ICE
+	HW. ICE driver would reconfigure those data as part of resume operation.
+
+int (*suspend)(struct platform_device *);
+	-- This API would be called by storage driver when storage device is going to
+	suspend mode. As of today, ICE driver does not do anything to handle suspend.
+
+int (*config)(struct platform_device *, struct request* , struct ice_data_setting*);
+	-- Storage driver would call this interface to get all crypto data required to
+	perform crypto operation.
+
+int (*status)(struct platform_device *);
+	-- Storage driver would call this interface to check if previous data transfer
+	generated any error.
+
+Config options
+==============
+This driver is enabled by the kernel config option CONFIG_CRYPTO_DEV_MSM_ICE.
+
+Dependencies
+============
+ICE driver depends upon corresponding ICE driver on TZ side to function
+appropriately.
+
+Known Issues
+============
+1. ICE HW emits 0s even if it has generated an interrupt
+This issue has significant impact on how ICE interrupts are handled. Currently,
+ICE driver does not register for any of the ICE interrupts but enables the
+sources of interrupt. Once storage driver asks to check the status of interrupt,
+it reads and clears the clear status and provide read status to storage driver.
+This mechanism though not optimal but prevents filesystem curruption.
+This issue has been fixed in newer chips.
+
+2. ICE HW wipes all crypto data during power collapse
+This issue necessiate that ICE driver on TZ side store the crypto material
+which is not required in the case of general purpose crypto engine.
+This issue has been fixed in newer chips.
+
+Further Improvements
+====================
+Currently, Due to PFE use case, ICE driver is dependent upon dm-req-crypt to
+provide the keys as part of request structure. This couples ICE driver with
+dm-req-crypt based solution. It is under discussion to expose an IOCTL based
+and registeration based interface APIs from ICE driver. ICE driver would use
+these two interfaces to find out if any key exists for current request. If
+yes, choose the right key index received from IOCTL or registeration based
+APIs. If not, dont set any crypto parameter in the request.
diff --git a/Documentation/crypto/msm/qce.txt b/Documentation/crypto/msm/qce.txt
new file mode 100644
index 0000000..9f1b313b
--- /dev/null
+++ b/Documentation/crypto/msm/qce.txt
@@ -0,0 +1,228 @@
+Introduction:
+=============
+
+The QTI crypto engine (qce) driver is a module that
+provides common services for accessing the QTI crypto device.
+Currently, the two main clients of qce are
+-qcrypto driver (module provided for accessing CE HW by kernel space apps)
+-qcedev driver (module provided for accessing CE HW by user space apps)
+
+
+The crypto engine (qce) driver is a client to the DMA driver for the QTI
+DMA device - Application Data Mover (ADM). ADM is used to provide the DMA
+transfer capability between QTI crypto device hardware and DDR memory
+for crypto operations.
+
+  Figure 1.
+  ---------
+
+  Linux kernel
+  (ex:IPSec)<-----*  QTI crypto driver----+
+			(qcrypto)	  |
+		   (for kernel space app) |
+					  |
+					  +-->|
+					      |
+					      | *qce   <----> QTI
+					      | driver        ADM driver <---> ADM HW
+					  +-->|			|		|
+					  |			|		|
+					  |			|		|
+					  |			|		|
+   Linux kernel				  |			|		|
+   misc device  <--- *QCEDEV Driver-------+			|		|
+   interface             (qcedev) 			(Reg interface)	 (DMA interface)
+			(for user space app)			\		/
+								 \	       /
+								  \	      /
+								   \	     /
+								    \	    /
+								     \	   /
+								      \	  /
+								QTI crypto CE3 HW
+
+
+ The entities marked with (*) in the Figure 1, are the software components of
+ the Linux QTI crypto modules.
+
+===============
+IMPORTANT NOTE:
+===============
+(1) The CE hardware can be accessed either from user space OR kernel space,
+    at one time. Both user space and kernel space clients cannot access the
+    qce driver (and the CE hardware) at the same time.
+	- If your device has user space apps that needs to access the crypto
+	  hardware, make sure to have the qcrypto module disabled/unloaded.
+	  This will result in the kernel space apps to use the registered
+	  software implementation of the crypto algorithms.
+	- If your device has kernel space apps that needs to access the
+	  crypto hardware, make sure to have qcedev module disabled/unloaded
+	  and implement your user space application to use the software
+	  implementation (ex: openssl/crypto) of the crypto algorithms.
+
+(2) If your device has Playready(Windows Media DRM) application enabled and
+    uses the qcedev module to access the crypto hardware accelerator,
+    please be informed that for performance reasons, the CE hardware will need
+    to be dedicated to playready application.  Any other user space application
+    should be implemented to use the SW implementation (ex: openssl/crypto)
+    of the crypto algorithms.
+
+
+Hardware description:
+=====================
+
+QTI Crypto HW device family provides a series of algorithms implemented
+in the device hardware.
+
+Crypto 2 hardware provides hashing - SHA-1, SHA-256, ciphering - DES, 3DES, AES
+algorithms, and concurrent operations of hashing, and ciphering.
+
+In addition to those functions provided by Crypto 2 HW, Crypto 3 HW provides
+fast AES algorithms.
+
+In addition to those functions provided by Crypto 3 HW, Crypto 3E provides
+HMAC-SHA1 hashing algorithm, and Over The Air (OTA) f8/f9 algorithms as
+defined by the 3GPP forum.
+
+
+Software description
+====================
+
+The crypto device is defined as a platform device. The driver is
+independent of the platform. The driver supports multiple instances of
+crypto HW.
+All the platform specific parameters are defined in the board init
+file, eg. arch/arm/mach-msm/board-msm7x30.c for MSM7x30.
+
+The qce driver provide the common services of HW crypto
+access to the two drivers as listed above (qcedev, qcrypto. It sets up
+the crypto HW device for the operation, then it requests ADM driver for
+the DMA of the crypto operation.
+
+Two ADM channels and two command lists (one command list for each
+channel) are involved in an operation.
+
+The setting up of the command lists and the procedure of the operation
+of the crypto device are described in the following sections.
+
+The command list for the first DMA channel is set up as follows:
+
+  1st command of the list is for the DMA transfer from DDR memory to the
+  crypto device to input data to crypto device. The dst crci of the command
+  is set for crci-in for this crypto device.
+
+  2nd command is for the DMA transfer is from crypto device to DDR memory for
+  the authentication result. The src crci is set as crci-hash-done of the
+  crypto device. If authentication is not required in the operation,
+  the 2nd command is not used.
+
+The command list for the second DMA channel is set up as follows:
+
+  One command to DMA data from crypto device to DDR memory for encryption or
+  decryption output from crypto device.
+
+To accomplish ciphering and authentication concurrent operations, the driver
+performs the following steps:
+    (a). set up HW crypto device
+    (b). hit the crypto go register.
+    (c). issue the DMA command of first channel to the ADM driver,
+    (d). issue the DMA command of 2nd channel to the ADM driver.
+
+SHA1/SHA256 is an authentication/integrity hash algorithm. To accomplish
+hash operation (or any authentication only algorithm), 2nd DMA channel is
+not required. Only steps (a) to (c) are performed.
+
+At the completion of the DMA operation (for (c) and (d)) ADM driver
+invokes the callback registered to the DMA driver. This signifies the end of
+the DMA operation(s). The driver reads the status and other information from
+the CE hardware register and then invokes the callback to the qce driver client.
+This signal the completion and the results of the DMA along with the status of
+the CE hardware to the qce driver client. This completes a crypto operation.
+
+In the qce driver initialization, memory for the two command lists, descriptor
+lists for each crypto device are allocated out of coherent memory, using Linux
+DMA API. The driver pre-configures most of the two ADM command lists
+in the initialization. During each crypto operation, minimal set up is required.
+src_dscr or/and dst_dscr descriptor list of the ADM command are populated
+from the information obtained from the corresponding data structure. eg: for
+AEAD request, the following data structure provides the information:
+
+    struct aead_request *req
+      ......
+    req->assoc
+    req->src
+    req->dst
+
+The DMA address of a scatter list will be retrieved and set up in the
+descriptor list of an ADM command.
+
+Power Management
+================
+  none
+
+
+Interface:
+==========
+
+The interface is defined in qce.h
+
+The clients qcrypto, qcedev drivers are the clients using
+the interfaces.
+
+The following services are provided by the qce driver -
+
+     qce_open(), qce_close(), qce_ablk_cipher_req(),
+     qce_hw_support(), qce_process_sha_req()
+
+  qce_open() is the first request from the client, ex. QTI crypto
+  driver (qcedev, qcrypto), to open a crypto engine. It is normally
+  called at the probe function of the client for a device. During the
+  probe,
+  - ADM command list structure will be set up
+  - Crypto device will be initialized.
+  - Resource associated with the crypto engine is retrieved by doing
+    platform_get_resource() or platform_get_resource_byname().
+
+ The resources for a device are
+    - crci-in, crci-out, crci-hash-done
+    - two DMA channel IDs, one for encryption and decryption input, one for
+      output.
+    - base address of the HW crypto device.
+
+  qce_close() is the last request from the client. Normally, it is
+  called from the remove function of the client.
+
+  qce_hw_support() allows the client to query what is supported
+  by the crypto engine hardware.
+
+  qce_ablk_cipher_req() provides ciphering service to the client.
+  qce_process_sha_req() provide hashing service to the client.
+  qce_aead_req() provide aead service to the client.
+
+Module parameters:
+==================
+
+The following module parameters are defined in the board init file.
+-CE hardware base register address
+-Data mover channel used for transfer to/from CE hardware
+These parameters differ in each platform.
+
+
+Dependencies:
+=============
+
+Existing DMA driver.
+The transfers are DMA'ed between the crypto hardware and DDR memory via the
+data mover, ADM. The data transfers are set up to use the existing dma driver.
+
+User space utilities:
+=====================
+  n/a
+
+Known issues:
+=============
+  n/a
+
+To do:
+======
+  n/a
diff --git a/Documentation/crypto/msm/qcedev.txt b/Documentation/crypto/msm/qcedev.txt
new file mode 100644
index 0000000..0638dd9
--- /dev/null
+++ b/Documentation/crypto/msm/qcedev.txt
@@ -0,0 +1,231 @@
+Introduction:
+=============
+
+This driver provides IOCTLS for user space application to access crypto
+engine hardware for the qcedev crypto services. The driver supports the
+following crypto algorithms
+- AES-128, AES-256 (ECB, CBC and CTR mode)
+- AES-192, (ECB, CBC and CTR mode)
+  (support exists on platform supporting CE 3.x hardware)
+- SHA1/SHA256
+- AES-128, AES-256 (XTS), AES CMAC, SHA1/SHA256 HMAC
+  (support exists on platform supporting CE 4.x hardware)
+
+Hardware description:
+=====================
+Crypto 3E provides cipher and hash algorithms as defined in the
+3GPP forum specifications.
+
+
+Software description
+====================
+
+The driver is a Linux platform device driver. For an msm target,
+there can be multiple crypto devices assigned for QCEDEV.
+
+The driver is a misc device driver as well.
+The following operations are registered in the driver,
+-qcedev_ioctl()
+-qcedev_open()
+-qcedev_release()
+
+The following IOCTLS are available to the user space application(s)-
+
+  Cipher IOCTLs:
+  --------------
+    QCEDEV_IOCTL_ENC_REQ is for encrypting data.
+    QCEDEV_IOCTL_DEC_REQ is for decrypting data.
+
+  Hashing/HMAC IOCTLs
+  -------------------
+
+    QCEDEV_IOCTL_SHA_INIT_REQ is for initializing a hash/hmac request.
+    QCEDEV_IOCTL_SHA_UPDATE_REQ is for updating hash/hmac.
+    QCEDEV_IOCTL_SHA_FINAL_REQ is for ending the hash/mac request.
+    QCEDEV_IOCTL_GET_SHA_REQ is for retrieving the hash/hmac for data
+	packet of known size.
+    QCEDEV_IOCTL_GET_CMAC_REQ is for retrieving the MAC (using AES CMAC
+	algorithm) for data packet of known size.
+
+The requests are synchronous. The driver will put the process to
+sleep, waiting for the completion of the requests using wait_for_completion().
+
+Since the requests are coming out of user space application, before giving
+the requests to the low level qce driver, the ioctl requests and the
+associated input/output buffer will have to be safe checked, and copied
+to/from kernel space.
+
+The extra copying of requests/buffer can affect the performance. The issue
+with copying the data buffer is resolved by having the client use PMEM
+allocated buffers.
+
+NOTE:  Using memory allocated via PMEM is supported only for in place
+       operations where source and destination buffers point to the same
+       location. Support for different source and destination buffers
+       is not supported currently.
+       Furthermore, when using PMEM, and in AES CTR mode, when issuing an
+       encryption or decryption request, a non-zero byteoffset is not
+       supported.
+
+The design of the driver is to allow multiple open, and multiple requests
+to be issued from application(s). Therefore, the driver will internally queue
+the requests, and serialize the requests to the low level qce (or qce40) driver.
+
+On an IOCTL request from an application, if there is no outstanding
+request, a the driver will issue a "qce" request, otherwise,
+the request is queued in the driver queue.  The process is suspended
+waiting for completion.
+
+On completion of a request by the low level qce driver, the internal
+tasklet (done_tasklet) is scheduled. The sole purpose of done_tasklet is
+to call the completion of the current active request (complete()), and
+issue more requests to the qce, if any.
+When the process wakes up from wait_for_completion(), it will collect the
+return code, and return the ioctl.
+
+A spin lock is used to protect the critical section of internal queue to
+be accessed from multiple tasks, SMP, and completion callback
+from qce.
+
+The driver maintains a set of statistics using debug fs. The files are
+in /debug/qcedev/stats1, /debug/qcedev/stats2, /debug/qcedev/stats3;
+one for each instance of device. Reading the file associated with
+a device will retrieve the driver statistics for that device.
+Any write to the file will clear the statistics.
+
+
+Power Management
+================
+n/a
+
+
+Interface:
+==========
+
+Linux user space applications will need to open a handle
+(file descriptor) to the qcedev device.  This is achieved by doing
+the following to retrieve a file descriptor to the device.
+
+     fd = open("/dev/qce", O_RDWR);
+     ..
+     ioctl(fd, ...);
+
+Once a valid fd is retrieved, user can call the following ioctls with
+the fd as the first parameter and a pointer to an appropriate data
+structure, qcedev_cipher_op_req or qcedev_sha_op_req (depending on
+cipher/hash functionality) as the second parameter.
+
+The following IOCTLS are available to the user space application(s)-
+
+  Cipher IOCTLs:
+  --------------
+    QCEDEV_IOCTL_ENC_REQ is for encrypting data.
+    QCEDEV_IOCTL_DEC_REQ is for decrypting data.
+
+	The caller of the IOCTL passes a pointer to the structure shown
+	below, as the second parameter.
+
+	struct	qcedev_cipher_op_req {
+		int				use_pmem;
+		union{
+			struct qcedev_pmem_info pmem;
+			struct qcedev_vbuf_info vbuf;
+		};
+		uint32_t			entries;
+		uint32_t			data_len;
+		uint8_t				in_place_op;
+		uint8_t				enckey[QCEDEV_MAX_KEY_SIZE];
+		uint32_t			encklen;
+		uint8_t				iv[QCEDEV_MAX_IV_SIZE];
+		uint32_t			ivlen;
+		uint32_t			byteoffset;
+		enum qcedev_cipher_alg_enum	alg;
+		enum qcedev_cipher_mode_enum	mode;
+		enum qcedev_oper_enum		op;
+	};
+
+  Hashing/HMAC IOCTLs
+  -------------------
+
+    QCEDEV_IOCTL_SHA_INIT_REQ is for initializing a hash/hmac request.
+    QCEDEV_IOCTL_SHA_UPDATE_REQ is for updating hash/hmac.
+    QCEDEV_IOCTL_SHA_FINAL_REQ is for ending the hash/mac request.
+    QCEDEV_IOCTL_GET_SHA_REQ is for retrieving the hash/hmac for data
+	packet of known size.
+    QCEDEV_IOCTL_GET_CMAC_REQ is for retrieving the MAC (using AES CMAC
+	algorithm) for data packet of known size.
+
+	The caller of the IOCTL passes a pointer to the structure shown
+	below, as the second parameter.
+
+	struct	qcedev_sha_op_req {
+		struct buf_info			data[QCEDEV_MAX_BUFFERS];
+		uint32_t			entries;
+		uint32_t			data_len;
+		uint8_t				digest[QCEDEV_MAX_SHA_DIGEST];
+		uint32_t			diglen;
+		uint8_t				*authkey;
+		uint32_t			authklen;
+		enum qcedev_sha_alg_enum	alg;
+		struct qcedev_sha_ctxt		ctxt;
+	};
+
+The IOCTLs and associated request data structures are defined in qcedev.h
+
+
+Module parameters:
+==================
+
+The following module parameters are defined in the board init file.
+-CE hardware nase register address
+-Data mover channel used for transfer to/from CE hardware
+These parameters differ in each platform.
+
+
+
+Dependencies:
+=============
+qce driver. Please see Documentation/arm/msm/qce.txt.
+
+
+User space utilities:
+=====================
+
+none
+
+Known issues:
+=============
+
+none.
+
+
+To do:
+======
+  Enhance Cipher functionality:
+  (1) Add support for handling > 32KB for ciphering functionality when
+  - operation is not an "in place" operation (source != destination).
+    (when using PMEM allocated memory)
+
+Limitations:
+============
+  (1) In case of cipher functionality, Driver does not support
+      a combination of different memory sources for source/destination.
+      In other words,  memory pointed to by  src and dst,
+      must BOTH (src/dst) be "pmem" or BOTH(src/dst) be "vbuf".
+
+  (2) In case of hash functionality, driver does not support handling data
+      buffers allocated via PMEM.
+
+  (3) Do not load this driver if your device already has kernel space apps
+      that need to access the crypto hardware.
+      Make sure to have qcedev module disabled/unloaded and implement your user
+      space application to use the software implementation (ex: openssl/crypto)
+      of the crypto algorithms.
+      (NOTE:  Please refer to details on the limitations listed in qce.txt)
+
+  (4) If your device has Playready (Windows Media DRM) application enabled
+      and uses the qcedev module to access the crypto hardware accelerator,
+      please be informed that for performance reasons, the CE hardware will
+      need to be dedicated to playready application.  Any other user space
+      application should be implemented to use the software implementation
+      (ex: openssl/crypto) of the crypto algorithms.
diff --git a/Documentation/crypto/msm/qcrypto.txt b/Documentation/crypto/msm/qcrypto.txt
new file mode 100644
index 0000000..2503103
--- /dev/null
+++ b/Documentation/crypto/msm/qcrypto.txt
@@ -0,0 +1,142 @@
+Introduction:
+=============
+
+QTI Crypto (qcrypto) driver is a Linux crypto driver which interfaces
+with the Linux kernel crypto API layer to provide the HW crypto functions.
+This driver is accessed by kernel space apps via the kernel crypto API layer.
+At present there is no means for user space apps to access this module.
+
+Hardware description:
+=====================
+
+QTI Crypto HW device family provides a series of algorithms implemented
+in the device.
+
+Crypto 2 hardware provides hashing - SHA-1, SHA-256, ciphering - DES, 3DES, AES
+algorithms, and concurrent operations of hashing, and ciphering.
+
+In addition to those functions provided by Crypto 2 HW, Crypto 3 provides fast
+AES algorithms.
+
+In addition to those functions provided by Crypto 3 HW, Crypto 3E provides
+HMAC-SHA1 hashing algorithm.
+
+In addition to those functions provided by Crypto 3 HW, Crypto 4.0 provides
+HMAC-SHA1/SHA256, AES CBC-MAC hashing algorithm and AES XTS/CCM cipher
+algorithms.
+
+
+Software description
+====================
+
+The module init function (_qcrypto_init()), does a platform_register(),
+to register the driver. As the result, the driver probe function,
+_qcrypto_probe(), will be invoked for each registered device.
+
+In the probe function, driver opens the low level CE (qce_open), and
+registers the supported algorithms to the kernel crypto API layer.
+Currently, qcrypto supports the following algorithms.
+
+      ablkcipher -
+          cbc(aes),ecb(aes),ctr(aes)
+      ahash -
+          sha1, sha256
+      aead -
+          authenc(hmac(sha1),cbc(aes))
+
+      The hmac(sha1), hmac(sha256, authenc(hmac(sha1),cbc(aes)), ccm(aes)
+      and xts(aes) algorithms are registered for some platforms that
+      support these in the CE hardware
+
+The HW device  can support various algorithms. However, the most important
+algorithms to gain the performance using a HW crypto accelerator are
+AEAD, and ABLKCIPHER.
+
+AEAD stands for "authentication encryption with association data".
+ABLKCIPHER stands of "asynchronous block cipher".
+
+The AEAD structure is described in the following header file aead.h
+
+The design of the driver is to allow multiple requests
+issued from kernel client SW (eg IPSec).
+Therefore, the driver will have to internally queue the requests, and
+serialize the requests to the low level qce driver.
+
+When a request is received from the client, if there is no outstanding
+request, a qce (or qce40) request is issued, otherwise, the request is
+queued in the driver queue.
+
+On completion of a request, the qce (or qce40) invokes the registered
+callback from the qcrypto.  The internal tasklet (done_tasklet) is scheduled
+in this callback function. The sole purpose of done_tasklet is
+to call the completion of the current active request, and
+issue more requests to the qce (or qce40), if any exists.
+
+A spin lock is used to protect the critical section of internal queue to
+be accessed from multiple tasks, SMP, and completion callback
+from qce.
+
+The driver maintains a set of statistics using debug fs. The files are
+in /debug/qcrypto/stats1, /debug/qcrypto/stats2, /debug/qcrypto/stats3;
+one for each instance of device. Reading the file associated with
+a device will retrieve the driver statistics for that device.
+Any write to the file will clear the statistics.
+
+Test vectors for  authenc(hmac(sha1),cbc(aes)) algorithm are
+developed offline, and imported to crypto/testmgr.c, and crypto/testmgr.h.
+
+
+Power Management
+================
+  none
+
+
+Interface:
+==========
+The kernel interface is defined in crypto.h.
+
+
+Module parameters:
+==================
+
+All the platform specific parameters are defined in the board init
+file, eg. arch/arm/mach-msm/board-mssm7x30.c for msm7x30.
+
+Dependencies:
+=============
+qce driver.
+
+
+User space utilities:
+=====================
+  n/a
+
+Known issues:
+=============
+  n/a
+
+To do:
+======
+  Add Hashing algorithms.
+
+
+Limitations:
+===============
+(1) Each packet transfer size (for cipher and hash) is limited to maximum of
+    32KB.  This is a limitation in the crypto engine hardware. Client will
+    have to break packets larger than 32KB into multiple requests of smaller
+    size data packets.
+
+(2) Do not load this driver if your device has user space apps that needs to
+    access the crypto hardware. Please make sure to have the qcrypto module
+    disabled/unloaded.
+    Not having the driver loaded, will result in the kernel space apps to use
+    the registered software implementation of the crypto algorithms.
+
+(3) If your device has Playready application enabled and uses the qcedev module
+    to access the crypto hardware accelerator, please be informed that for
+    performance reasons, the CE hardware will need to be dedicated to playready
+    application.  Any other user space or kernel application should be implemented
+    to use the software implementation of the crypto algorithms.
+
+    (NOTE:  Please refer to details on the limitations listed in qce/40.txt)
diff --git a/Documentation/devicetree/bindings/arm/msm/tz-log.txt b/Documentation/devicetree/bindings/arm/msm/tz-log.txt
new file mode 100644
index 0000000..d7e84a3
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/tz-log.txt
@@ -0,0 +1,24 @@
+* TZLOG (Trust Zone Log)
+
+The tz_log driver is a platform device driver that exposes a debugfs
+interface for accessing and displaying diagnostic information
+related to secure code (Trustzone/QSEE).
+
+Required properties:
+- compatible : Should be "qcom,tz-log"
+- reg        : Offset and size of the register set for the device
+
+Optional properties:
+- qcom,hyplog-enabled   : (boolean) indicates if driver supports HYP logger service.
+- hyplog-address-offset : Register offset to get the HYP log base address.
+- hyplog-size-offset    : Register offset to get the HYP log size parameter.
+
+Example:
+
+	qcom,tz-log@fe805720 {
+		compatible = "qcom,tz-log";
+                reg = <0xfe805720 0x1000>;
+		qcom,hyplog-enabled;
+		hyplog-address-offset = 0x410;
+		hyplog-size-offset = 0x414;
+	};
diff --git a/Documentation/devicetree/bindings/crypto/msm/ice.txt b/Documentation/devicetree/bindings/crypto/msm/ice.txt
new file mode 100644
index 0000000..2d0e580
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/msm/ice.txt
@@ -0,0 +1,32 @@
+* Inline Crypto Engine (ICE)
+
+Required properties:
+  - compatible : should be "qcom,ice"
+  - reg : <register mapping>
+
+Optional properties:
+  - interrupt-names     : name describing the interrupts for ICE IRQ
+  - interrupts          : <interrupt mapping for ICE IRQ>
+  - qcom,enable-ice-clk : should enable clocks for ICE HW
+  - clocks              : List of phandle and clock specifier pairs
+  - clock-names         : List of clock input name strings sorted in the same
+                          order as the clocks property.
+  - qocm,op-freq-hz     : max clock speed sorted in the same order as the clocks
+                          property.
+  - qcom,instance-type  : describe the storage type for which ICE node is defined
+			  currently, only "ufs" and "sdcc" are supported storage type
+
+Example:
+        ufs_ice: ufsice@630000 {
+                compatible = "qcom,ice";
+                reg = <0x630000 0x8000>;
+                interrupt-names = "ufs_ice_nonsec_level_irq", "ufs_ice_sec_level_irq";
+                interrupts = <0 258 0>, <0 257 0>;
+                qcom,enable-ice-clk;
+                clock-names = "ice_core_clk_src", "ice_core_clk";
+                clocks = <&clock_gcc clk_ufs_ice_core_clk_src>,
+                         <&clock_gcc clk_gcc_ufs_ice_core_clk>;
+                qcom,op-freq-hz = <300000000>, <0>;
+		qcom,instance-type = "ufs";
+                status = "disabled";
+        };
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcedev.txt b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
new file mode 100644
index 0000000..c8077cb
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/msm/qcedev.txt
@@ -0,0 +1,43 @@
+* QCEDEV (QTI Crypto Engine Device)
+
+Required properties:
+  - compatible : should be "qcom,qcedev"
+  - reg : should contain crypto, BAM register map.
+  - reg-names : should contain the crypto and bam base register names.
+  - interrupts : should contain crypto BAM interrupt.
+  - qcom,bam-pipe-pair : should contain crypto BAM pipe pair index.
+  - qcom,ce-hw-instance : should contain crypto HW instance.
+  - qcom,msm_bus,name: Should be "qcedev-noc"
+  - qcom,msm_bus,num_cases: Depends on the use cases for bus scaling
+  - qcom,msm_bus,active-only: Boolean flag for context of request (actve/dual)
+  - qcom,msm_bus,num_paths: The paths for source and destination ports
+  - qcom,msm_bus,vectors: Vectors for bus topology.
+  - qcom,ce-device: Device number.
+  - qcom,ce-opp-freq: indicates the CE operating frequency in Hz, changes from target to target.
+
+Optional properties:
+  - qcom,ce-hw-shared : optional, indicates if the hardware is shared between EE.
+  - qcom,ce-hw-key : optional, indicates if the hardware supports use of HW KEY.
+  - qcom,support-core-clk-only : optional, indicates if the HW supports single crypto core clk.
+  - qcom,bsm-ee : optional, indicate the BAM EE value, changes from target to target. Default value is 1 if not specified.
+
+Example:
+
+	qcom,qcedev@fd440000 {
+		compatible = "qcom,qcedev";
+		reg = <0xfd440000 0x20000>,
+			<0xfd444000 0x8000>;
+		reg-names = "crypto-base","crypto-bam-base";
+		interrupts = <0 235 0>;
+		qcom,bam-pipe-pair = <0>;
+		qcom,ce-hw-instance = <1>;
+		qcom,ce-device = <0>;
+		qcom,ce-hw-shared;
+                qcom,msm-bus,name = "qcedev-noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<56 512 0 0>,
+				<56 512 3936000 393600>,
+		qcom,ce-opp-freq = <100000000>;
+	};
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcota.txt b/Documentation/devicetree/bindings/crypto/msm/qcota.txt
new file mode 100644
index 0000000..3ce63af
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/msm/qcota.txt
@@ -0,0 +1,42 @@
+* QCOTA (Over The Air Crypto Device)
+
+Required properties:
+  - compatible : should be "qcom,qcota"
+  - reg : should contain crypto, BAM register map.
+  - reg-names : should contain the crypto and bam base register names.
+  - interrupts : should contain crypto BAM interrupt.
+  - qcom,bam-pipe-pair : should contain crypto BAM pipe pair index.
+  - qcom,ce-hw-instance : should contain crypto HW instance.
+  - qcom,ce-device: Unique QCOTA device identifier. 0 for first
+			instance, 1 for second instance, n-1 for n-th instance.
+  - qcom,ce-opp-freq: indicates the CE operating frequency in Hz, changes from target to target.
+
+Optional properties:
+  - qcom,support-core-clk-only : optional, indicates if the HW supports single crypto core clk.
+  - qcom,bsm-ee : optional, indicate the BAM EE value, changes from target to target.Default value is 1 if not specified.
+
+Example:
+
+	qcom,qcota@fe140000 {
+		compatible = "qcom,qcota";
+		reg = <0xfe140000 0x20000>,
+			<0xfe144000 0x8000>;
+		reg-names = "crypto-base","crypto-bam-base";
+		interrupts = <0 111 0>;
+		qcom,bam-pipe-pair = <1>;
+		qcom,ce-hw-instance = <2>;
+		qcom,ce-device = <0>;
+		qcom,ce-opp-freq = <100000000>;
+	};
+
+	qcom,qcota@fe0c0000 {
+		compatible = "qcom,qcota";
+		reg = <0xfe0c0000 0x20000>,
+			<0xfe0c4000 0x8000>;
+		reg-names = "crypto-base","crypto-bam-base";
+		interrupts = <0 113 0>;
+		qcom,bam-pipe-pair = <1>;
+		qcom,ce-hw-instance = <4>;
+		qcom,ce-device = <1>;
+		qcom,ce-opp-freq = <100000000>;
+	};
diff --git a/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
new file mode 100644
index 0000000..06b219a
--- /dev/null
+++ b/Documentation/devicetree/bindings/crypto/msm/qcrypto.txt
@@ -0,0 +1,61 @@
+* QCRYPTO (QTI Crypto)
+
+Required properties:
+  - compatible : should be "qcom,qcrypto"
+  - reg : should contain crypto, BAM register map.
+  - reg-names : should contain the crypto and bam base register names.
+  - interrupts : should contain crypto BAM interrupt.
+  - qcom,bam-pipe-pair : should contain crypto BAM pipe pair index.
+  - qcom,ce-hw-instance : should contain crypto HW instance.
+  - qcom,msm_bus,name: Should be "qcrypto-noc"
+  - qcom,msm_bus,num_cases: Depends on the use cases for bus scaling
+  - qcom,msm_bus,active-only: Boolean flag for context of request (actve/dual)
+  - qcom,msm_bus,num_paths: The paths for source and destination ports
+  - qcom,ce-device: Device number. Device number is encoded with the following:
+		bit 3-0   device type:	0 for full disk encryption(fde)
+					1 for per file encrption(pfe)
+		bit 7-4   unit number within the device type.
+
+
+Optional properties:
+  - qcom,ce-hw-shared : optional, indicates if the hardware is shared between EE.
+  - qcom,ce-hw-key : optional, indicates if the hardware supports use of HW KEY.
+  - qcom,use-sw-aes-cbc-ecb-ctr-algo : optional, indicates if use SW aes-cbc/ecb/ctr algorithm.
+  - qcom,use-sw-aes-xts-algo : optional, indicates if use SW aes-xts algorithm.
+  - qcom,use-sw-aead-algo : optional, indicates if use SW aead algorithm.
+  - qcom,use-sw-ahash-algo : optional, indicates if use SW hash algorithm.
+  - qcom,use-sw-hmac-algo : optional, indicates if use SW hmac algorithm.
+  - qcom,use-sw-aes-ccm-algo : optional, indicates if use SW aes-ccm algorithm.
+  - qcom,clk-mgmt-sus-res : optional, indicate if the ce clocks need to be disabled/enabled in suspend/resume function.
+  - qcom,support-core-clk-only : optional, indicates if the HW supports single crypto core clk.
+  - qcom,bsm-ee : optional, indicate the BAM EE value, changes from target to target.Default value is 1 if not specified.
+
+  - qcom,ce-opp-freq: optional, indicates the CE operating frequency in Hz,
+	changes from target to target. If not specified, by default the
+	frequency is set as 100MHZ.
+
+  - qcom,msm_bus,vectors: optional, indicates vectors for bus topology.
+        This attribute is required for msm targets where bus scaling is
+	required. For other targets such as fsm, they do not perform
+	bus scaling. It is not required for those targets.
+
+Example:
+
+        qcom,qcrypto@fd444000 {
+		compatible = "qcom,qcrypto";
+		reg = <0xfd440000 0x20000>,
+		      <0xfd444000 0x8000>;
+		reg-names = "crypto-base","crypto-bam-base";
+		interrupts = <0 235 0>;
+		qcom,bam-pipe-pair = <1>;
+		qcom,ce-hw-instance = <1>;
+		qcom,ce-device = <0>;
+		qcom,ce-hw-shared;
+                qcom,msm-bus,name = "qcrypto-noc";
+		qcom,msm-bus,num-cases = <2>;
+		qcom,msm-bus,num-paths = <1>;
+		qcom,msm-bus,vectors-KBps =
+				<56 512 0 0>,
+				<56 512 3936000 393600>,
+		qcom,ce-opp-freq = <100000000>;
+	};
diff --git a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
index 6111c88..0db9970 100644
--- a/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
+++ b/Documentation/devicetree/bindings/mmc/sdhci-msm.txt
@@ -44,6 +44,9 @@
 	- gpios - specifies gpios assigned for sdhc slot.
 	- qcom,gpio-names -  a list of strings that map in order to the list of gpios
 
+	Tlmm pins are specified as <clk cmd data> and starting with eMMC5.0 as
+	<clk cmd data rclk>
+
 Example:
 
 	aliases {
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
index 9638888..addb0a6 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/qpnp-fg-gen3.txt
@@ -154,6 +154,20 @@
 		    asleep and the battery is discharging. This option requires
 		    qcom,fg-esr-timer-awake to be defined.
 
+- qcom,fg-esr-pulse-thresh-ma
+	Usage:      optional
+	Value type: <u32>
+	Definition: ESR pulse qualification threshold in mA. If this is not
+		    specified, a default value of 110 mA will be configured.
+		    Allowed values are from 1 to 997.
+
+- qcom,fg-esr-meas-curr-ma
+	Usage:      optional
+	Value type: <u32>
+	Definition: ESR measurement current in mA. If this is not specified,
+		    a default value of 120 mA will be configured. Allowed
+		    values are 60, 120, 180 and 240.
+
 - qcom,cycle-counter-en
 	Usage:      optional
 	Value type: <empty>
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
index c8f2a5a..92ef23c 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/smb138x-charger.txt
@@ -31,6 +31,12 @@
 	revid module. This is used to identify
 	the SMB subtype.
 
+- qcom,parallel-mode
+  Usage:      optional
+  Value type: <u32>
+  Definition: Specifies parallel charging mode. If not specified, MID-MID
+	      option is selected by default.
+
 - qcom,suspend-input
   Usage:      optional
   Value type: <empty>
diff --git a/Documentation/devicetree/bindings/qseecom/qseecom.txt b/Documentation/devicetree/bindings/qseecom/qseecom.txt
new file mode 100644
index 0000000..8fbf8e2
--- /dev/null
+++ b/Documentation/devicetree/bindings/qseecom/qseecom.txt
@@ -0,0 +1,85 @@
+* QSEECOM (QTI Secure Execution Environment Communicator)
+
+Required properties:
+- compatible : Should be "qcom,qseecom"
+- reg : should contain memory region address reserved for loading secure apps.
+- qcom,disk-encrypt-pipe-pair : indicates what CE HW pipe pair is used for disk encryption
+- qcom,file-encrypt-pipe-pair : indicates what CE HW pipe pair is used for file encryption
+- qcom,support-multiple-ce-hw-instance : indicates if multicore CE support is supported.
+- qcom,hlos-num-ce-hw-instances : indicates number of CE HW instances hlos can use.
+- qcom,hlos-ce-hw-instance : indicates what CE HW is used by HLOS crypto driver
+- qcom,qsee-ce-hw-instance : indicates what CE HW is used by secure domain (TZ) crypto driver
+- qcom, msm_bus,name: Should be "qseecom-noc"
+- qcom, msm_bus,num_cases: Depends on the use cases for bus scaling
+- qcom, msm_bus,num_paths: The paths for source and destination ports
+- qcom, msm_bus,vectors: Vectors for bus topology.
+- qcom,ce-opp-freq: indicates the CE operating frequency in Hz, changes from target to target.
+- qcom,full-disk-encrypt-info : Vectors defining full disk encryption unit, crypto engine, pipe pair configuration in <unit#, ce#, pipe-pair#>
+- qcom,per-file-encrypt-info : Vectors defining per file encryption unit, crypto engine, pipe pair configuration in <unit#, ce#, pipe-pair#>
+
+Optional properties:
+  - qcom,support-bus-scaling : indicates if driver support scaling the bus for crypto operation.
+  - qcom,support-fde : indicates if driver support key managing for full disk encryption feature.
+  - qcom,support-pfe : indicates if driver support key managing for per file encryption feature.
+  - qcom,no-clock-support : indicates clocks are not handled by qseecom (could be handled by RPM)
+  - qcom,appsbl-qseecom-support : indicates if there is qseecom support in appsbootloader
+  - vdd-hba-supply   : handle for fixed power regulator
+  - qcom,qsee-reentrancy-support: indicates the qsee reentrancy phase supported by the target
+  - qcom,commonlib64-loaded-by-uefi: indicates commonlib64 is loaded by uefi already
+  - qcom,fde-key-size: indicates which FDE key size is used in device.
+
+Example:
+	qcom,qseecom@fe806000 {
+		compatible = "qcom,qseecom";
+		reg = <0x7f00000 0x500000>;
+		reg-names = "secapp-region";
+                qcom,disk-encrypt-pipe-pair = <2>;
+                qcom,file-encrypt-pipe-pair = <0>;
+		qcom,support-multiple-ce-hw-instance;
+		qcom,hlos-num-ce-hw-instances = <2>;
+		qcom,hlos-ce-hw-instance = <1 2>;
+		qcom,qsee-ce-hw-instance = <0>;
+		qcom,support-fde;
+		qcom,support-pfe;
+		qcom,msm_bus,name = "qseecom-noc";
+		qcom,msm_bus,num_cases = <4>;
+		qcom,msm_bus,active_only = <0>;
+		qcom,msm_bus,num_paths = <1>;
+		qcom,no-clock-support;
+		qcom,appsbl-qseecom-support;
+                qcom,fde-key-size;
+		qcom,msm_bus,vectors =
+			<55 512 0 0>,
+			<55 512 3936000000 393600000>,
+			<55 512 3936000000 393600000>,
+			<55 512 3936000000 393600000>;
+		qcom,ce-opp-freq = <100000000>;
+		vdd-hba-supply = <&gdsc_ufs>;
+	};
+
+Example: The following dts setup is the same as the example above.
+
+	qcom,qseecom@fe806000 {
+		compatible = "qcom,qseecom";
+		reg = <0x7f00000 0x500000>;
+		reg-names = "secapp-region";
+		qcom,support-fde;
+		qcom,full-disk-encrypt-info = <0 1 2>, <0 2 2>;
+		qcom,support-pfe;
+		qcom,per-file-encrypt-info = <0 1 0>, <0 2 0>;
+		qcom,qsee-ce-hw-instance = <0>;
+		qcom,msm_bus,name = "qseecom-noc";
+		qcom,msm_bus,num_cases = <4>;
+		qcom,msm_bus,active_only = <0>;
+		qcom,msm_bus,num_paths = <1>;
+		qcom,no-clock-support;
+		qcom,appsbl-qseecom-support;
+                qcom,fde-key-size;
+		qcom,msm_bus,vectors =
+			<55 512 0 0>,
+			<55 512 3936000000 393600000>,
+			<55 512 3936000000 393600000>,
+			<55 512 3936000000 393600000>;
+		qcom,ce-opp-freq = <100000000>;
+		vdd-hba-supply = <&gdsc_ufs>;
+	};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
index 38d7d38..3f0cf77 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-cdp.dtsi
@@ -128,3 +128,28 @@
 &pmi8998_charger {
 	qcom,batteryless-platform;
 };
+
+/ {
+aliases {
+		serial0 = &qupv3_se9_2uart;
+		spi0 = &qupv3_se8_spi;
+		i2c0 = &qupv3_se10_i2c;
+		i2c1 = &qupv3_se3_i2c;
+	};
+};
+
+&qupv3_se9_2uart {
+	status = "ok";
+};
+
+&qupv3_se8_spi {
+	status = "ok";
+};
+
+&qupv3_se3_i2c {
+	status = "ok";
+};
+
+&qupv3_se10_i2c {
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
index a6efb50..3f2317a 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-gpu.dtsi
@@ -256,6 +256,9 @@
 				"axi_clk", "memnoc_clk";
 
 		qcom,gmu-pwrlevels {
+			#address-cells = <1>;
+			#size-cells = <0>;
+
 			compatible = "qcom,gmu-pwrlevels";
 
 			qcom,gmu-pwrlevel@0 {
diff --git a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
index e1eacb19..ca89f38 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-mtp.dtsi
@@ -122,3 +122,28 @@
 &pmi8998_fg {
 	qcom,battery-data = <&mtp_batterydata>;
 };
+
+/ {
+aliases {
+		serial0 = &qupv3_se9_2uart;
+		spi0 = &qupv3_se8_spi;
+		i2c0 = &qupv3_se10_i2c;
+		i2c1 = &qupv3_se3_i2c;
+	};
+};
+
+&qupv3_se9_2uart {
+	status = "ok";
+};
+
+&qupv3_se8_spi {
+	status = "ok";
+};
+
+&qupv3_se3_i2c {
+	status = "ok";
+};
+
+&qupv3_se10_i2c {
+	status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-rumi.dts b/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
index 0f31c0a..be41858 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
+++ b/arch/arm64/boot/dts/qcom/sdm845-rumi.dts
@@ -16,7 +16,6 @@
 
 #include "sdm845.dtsi"
 #include "sdm845-rumi.dtsi"
-#include "sdm845-usb.dtsi"
 / {
 	model = "Qualcomm Technologies, Inc. SDM845 RUMI";
 	compatible = "qcom,sdm845-rumi", "qcom,sdm845", "qcom,rumi";
@@ -28,32 +27,3 @@
 		status = "disabled";
 	};
 };
-
-&usb0 {
-	/delete-property/ qcom,usb-dbm;
-	qcom,charging-disabled;
-	dwc3@a600000 {
-		maximum-speed = "high-speed";
-	};
-};
-
-&qusb_phy0 {
-	reg = <0x088e2000 0x4>,
-	      <0x0a720000 0x9500>;
-	reg-names = "qusb_phy_base",
-		"emu_phy_base";
-	qcom,emulation;
-	qcom,emu-init-seq = <0x19 0x1404
-			     0x20 0x1414
-			     0x79 0x1410
-			     0x00 0x1418
-			     0x99 0x1404
-			     0x04 0x1408
-			     0xd9 0x1404>;
-
-	qcom,emu-dcm-reset-seq = <0x5 0x14	/* 0x1 0x14 for E1.2 */
-				  0x100000 0x20
-				  0x0 0x20
-				  0x1a0 0x20	/* 0x220 0x20 for E1.2 */
-				  0x80 0x28>;
-};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
index 5625531..3ec83f5 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-rumi.dtsi
@@ -151,3 +151,33 @@
 &pmi8998_charger {
 	qcom,suspend-input;
 };
+
+&usb0 {
+	/delete-property/ qcom,usb-dbm;
+	extcon = <0>, <0>, <&eud>;
+	qcom,charging-disabled;
+	dwc3@a600000 {
+		maximum-speed = "high-speed";
+	};
+};
+
+&qusb_phy0 {
+	reg = <0x088e2000 0x4>,
+	      <0x0a720000 0x9500>;
+	reg-names = "qusb_phy_base",
+		"emu_phy_base";
+	qcom,emulation;
+	qcom,emu-init-seq = <0x19 0x1404
+			     0x20 0x1414
+			     0x79 0x1410
+			     0x00 0x1418
+			     0x99 0x1404
+			     0x04 0x1408
+			     0xd9 0x1404>;
+
+	qcom,emu-dcm-reset-seq = <0x5 0x14	/* 0x1 0x14 for E1.2 */
+				  0x100000 0x20
+				  0x0 0x20
+				  0x1a0 0x20	/* 0x220 0x20 for E1.2 */
+				  0x80 0x28>;
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
index d99e6de..3e00577 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-sde.dtsi
@@ -18,16 +18,22 @@
 		reg-names = "mdp_phys",
 			"vbif_phys";
 
-		clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
+		clocks =
+			<&clock_gcc GCC_DISP_AHB_CLK>,
+			<&clock_gcc GCC_DISP_AXI_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_AHB_CLK>,
 			<&clock_dispcc DISP_CC_MDSS_AXI_CLK>,
 			<&clock_dispcc DISP_CC_MDSS_MDP_CLK_SRC>,
-			<&clock_dispcc DISP_CC_MDSS_MDP_CLK>;
-		clock-names = "iface_clk", "bus_clk",
-			"core_clk_src", "core_clk";
-		clock-rate = <0 0 300000000 300000000>;
-		clock-max-rate = <0 0 430000000 430000000>;
+			<&clock_dispcc DISP_CC_MDSS_VSYNC_CLK_SRC>,
+			<&clock_dispcc DISP_CC_MDSS_MDP_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_VSYNC_CLK>;
+		clock-names = "gcc_iface", "gcc_bus",
+			"iface_clk", "bus_clk", "core_clk_src",
+			"vsync_clk_src", "core_clk", "vsync_clk";
+		clock-rate = <0 0 0 0 300000000 0 300000000 0 0>;
+		clock-max-rate = <0 0 0 0 430000000 0 430000000 0 0>;
 
-		mdp-vdd-supply = <&mdss_core_gdsc>;
+		sde-vdd-supply = <&mdss_core_gdsc>;
 
 		/* interrupt config */
 		interrupt-parent = <&intc>;
@@ -142,7 +148,7 @@
 
 			qcom,platform-supply-entry@0 {
 				reg = <0>;
-				qcom,supply-name = "mdp-vdd";
+				qcom,supply-name = "sde-vdd";
 				qcom,supply-min-voltage = <0>;
 				qcom,supply-max-voltage = <0>;
 				qcom,supply-enable-load = <0>;
@@ -184,6 +190,11 @@
 		qcom,sde-rsc-version = <1>;
 
 		vdd-supply = <&mdss_core_gdsc>;
+		clocks = <&clock_dispcc DISP_CC_MDSS_RSCC_AHB_CLK>,
+			<&clock_dispcc DISP_CC_MDSS_RSCC_VSYNC_CLK>;
+		clock-names = "iface_clk", "vsync_clk";
+		clock-rate = <0 0>;
+
 		qcom,sde-dram-channels = <2>;
 
 		/* data and reg bus scale settings */
diff --git a/arch/arm64/boot/dts/qcom/sdm845.dtsi b/arch/arm64/boot/dts/qcom/sdm845.dtsi
index 5a41ab7..9c2f81f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845.dtsi
@@ -832,17 +832,17 @@
 			<   422400000 0x50140116 0x00002020 0x1 2 >,
 			<   499200000 0x5014021a 0x00002020 0x1 3 >,
 			<   576000000 0x5014031e 0x00002020 0x1 4 >,
-			<   652800000 0x501c0422 0x00002020 0x1 5 >,
-			<   729600000 0x501c0526 0x00002020 0x1 6 >,
-			<   806400000 0x501c062a 0x00002222 0x1 7 >;
+			<   652800000 0x401c0422 0x00002020 0x1 5 >,
+			<   729600000 0x401c0526 0x00002020 0x1 6 >,
+			<   806400000 0x401c062a 0x00002222 0x1 7 >;
 
 		qcom,pwrcl-speedbin0-v0 =
 			<   300000000 0x000c000f 0x00002020 0x1 1 >,
 			<   422400000 0x50140116 0x00002020 0x1 2 >,
 			<   499200000 0x5014021a 0x00002020 0x1 3 >,
 			<   576000000 0x5014031e 0x00002020 0x1 4 >,
-			<   652800000 0x501c0422 0x00002020 0x1 5 >,
-			<   748800000 0x501c0527 0x00002020 0x1 6 >,
+			<   652800000 0x401c0422 0x00002020 0x1 5 >,
+			<   748800000 0x401c0527 0x00002020 0x1 6 >,
 			<   825600000 0x401c062b 0x00002222 0x1 7 >,
 			<   902400000 0x4024072f 0x00002626 0x1 8 >,
 			<   979200000 0x40240833 0x00002929 0x1 9 >,
@@ -855,9 +855,9 @@
 			<   422400000 0x50140116 0x00002020 0x1 2 >,
 			<   499200000 0x5014021a 0x00002020 0x1 3 >,
 			<   576000000 0x5014031e 0x00002020 0x1 4 >,
-			<   652800000 0x501c0422 0x00002020 0x1 5 >,
-			<   729600000 0x501c0526 0x00002020 0x1 6 >,
-			<   806400000 0x501c062a 0x00002222 0x1 7 >,
+			<   652800000 0x401c0422 0x00002020 0x1 5 >,
+			<   729600000 0x401c0526 0x00002020 0x1 6 >,
+			<   806400000 0x401c062a 0x00002222 0x1 7 >,
 			<   883200000 0x4024072b 0x00002525 0x1 8 >,
 			<   960000000 0x40240832 0x00002828 0x1 9 >,
 			<  1036800000 0x40240936 0x00002b2b 0x1 10 >,
@@ -2206,3 +2206,4 @@
 #include "sdm845-pinctrl.dtsi"
 #include "sdm845-audio.dtsi"
 #include "sdm845-gpu.dtsi"
+#include "sdm845-usb.dtsi"
diff --git a/drivers/char/hw_random/msm-rng.c b/drivers/char/hw_random/msm-rng.c
index 96fb986..18cd3e9 100644
--- a/drivers/char/hw_random/msm-rng.c
+++ b/drivers/char/hw_random/msm-rng.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2013, 2017, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -156,6 +156,7 @@
 	rng->hwrng.init = msm_rng_init,
 	rng->hwrng.cleanup = msm_rng_cleanup,
 	rng->hwrng.read = msm_rng_read,
+	rng->hwrng.quality = 700;
 
 	ret = devm_hwrng_register(&pdev->dev, &rng->hwrng);
 	if (ret) {
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index d5e2be6..a358dd0 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -224,6 +224,7 @@
 	u32 osm_clk_rate;
 	u32 xo_clk_rate;
 	bool secure_init;
+	bool per_core_dcvs;
 	bool red_fsm_en;
 	bool boost_fsm_en;
 	bool safe_fsm_en;
@@ -449,6 +450,17 @@
 	return 0;
 }
 
+static long cpu_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+					unsigned long *parent_rate)
+{
+	struct clk_hw *parent_hw = clk_hw_get_parent(hw);
+
+	if (!parent_hw)
+		return -EINVAL;
+
+	return clk_hw_round_rate(parent_hw, rate);
+}
+
 static unsigned long cpu_clk_recalc_rate(struct clk_hw *hw,
 					unsigned long parent_rate)
 {
@@ -1536,8 +1548,16 @@
 	parent = to_clk_osm(clk_hw_get_parent(&c->hw));
 
 	spin_lock_irqsave(&parent->lock, flags);
-	val = clk_osm_read_reg_no_log(parent,
+	/*
+	 * Use core 0's copy as proxy for the whole cluster when per
+	 * core DCVS is disabled.
+	 */
+	if (parent->per_core_dcvs)
+		val = clk_osm_read_reg_no_log(parent,
 			OSM_CYCLE_COUNTER_STATUS_REG(c->core_num));
+	else
+		val = clk_osm_read_reg_no_log(parent,
+			OSM_CYCLE_COUNTER_STATUS_REG(0));
 
 	if (val < c->prev_cycle_counter) {
 		/* Handle counter overflow */
@@ -2503,8 +2523,10 @@
 	clk_osm_misc_programming(&pwrcl_clk);
 	clk_osm_misc_programming(&perfcl_clk);
 
-	if (of_property_read_bool(pdev->dev.of_node,
-				"qcom,enable-per-core-dcvs")) {
+	pwrcl_clk.per_core_dcvs = perfcl_clk.per_core_dcvs =
+			of_property_read_bool(pdev->dev.of_node,
+				"qcom,enable-per-core-dcvs");
+	if (pwrcl_clk.per_core_dcvs) {
 		val = clk_osm_read_reg(&pwrcl_clk, CORE_DCVS_CTRL);
 		val |= BIT(0);
 		clk_osm_write_reg(&pwrcl_clk, val, CORE_DCVS_CTRL);
@@ -2516,6 +2538,7 @@
 
 	clk_ops_core = clk_dummy_ops;
 	clk_ops_core.set_rate = cpu_clk_set_rate;
+	clk_ops_core.round_rate = cpu_clk_round_rate;
 	clk_ops_core.recalc_rate = cpu_clk_recalc_rate;
 
 	spin_lock_init(&l3_clk.lock);
diff --git a/drivers/clk/qcom/debugcc-sdm845.c b/drivers/clk/qcom/debugcc-sdm845.c
index d74db61..d30675c 100644
--- a/drivers/clk/qcom/debugcc-sdm845.c
+++ b/drivers/clk/qcom/debugcc-sdm845.c
@@ -113,6 +113,10 @@
 	"disp_cc_mdss_spdm_pclk1_clk",
 	"disp_cc_mdss_spdm_rot_clk",
 	"disp_cc_mdss_vsync_clk",
+	"measure_only_snoc_clk",
+	"measure_only_cnoc_clk",
+	"measure_only_bimc_clk",
+	"measure_only_ipa_2x_clk",
 	"gcc_aggre_noc_pcie_tbu_clk",
 	"gcc_aggre_ufs_card_axi_clk",
 	"gcc_aggre_ufs_phy_axi_clk",
@@ -444,6 +448,14 @@
 			0x1C, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
 		{ "disp_cc_mdss_vsync_clk", 0x47, 4, DISP_CC,
 			0x6, 0xFF, 0, 0x3, 0, 1, 0x6000, 0x6008, 0x600C },
+		{ "measure_only_snoc_clk", 0x7, 4, GCC,
+			0x7, 0x3FFF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "measure_only_cnoc_clk", 0x15, 4, GCC,
+			0x7, 0x3FFF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "measure_only_bimc_clk", 0xc2, 4, GCC,
+			0x7, 0x3FFF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
+		{ "measure_only_ipa_2x_clk", 0x128, 4, GCC,
+			0x7, 0x3FFF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_aggre_noc_pcie_tbu_clk", 0x2D, 4, GCC,
 			0x2D, 0x3FF, 0, 0xF, 0, 4, 0x62008, 0x62000, 0x62004 },
 		{ "gcc_aggre_ufs_card_axi_clk", 0x11E, 4, GCC,
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index e02c103..2e8ef93 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -151,6 +151,38 @@
 	"core_bi_pll_test_se",
 };
 
+static struct clk_dummy measure_only_snoc_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "measure_only_snoc_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
+static struct clk_dummy measure_only_cnoc_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "measure_only_cnoc_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
+static struct clk_dummy measure_only_bimc_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "measure_only_bimc_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
+static struct clk_dummy measure_only_ipa_2x_clk = {
+	.rrate = 1000,
+	.hw.init = &(struct clk_init_data){
+		.name = "measure_only_ipa_2x_clk",
+		.ops = &clk_dummy_ops,
+	},
+};
+
 static struct pll_vco fabia_vco[] = {
 	{ 250000000, 2000000000, 0 },
 	{ 125000000, 1000000000, 1 },
@@ -2052,32 +2084,6 @@
 	},
 };
 
-static struct clk_branch gcc_qupv3_wrap0_core_2x_clk = {
-	.halt_reg = 0x17014,
-	.halt_check = BRANCH_HALT_VOTED,
-	.clkr = {
-		.enable_reg = 0x5200c,
-		.enable_mask = BIT(9),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_qupv3_wrap0_core_2x_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch gcc_qupv3_wrap0_core_clk = {
-	.halt_reg = 0x1700c,
-	.halt_check = BRANCH_HALT_VOTED,
-	.clkr = {
-		.enable_reg = 0x5200c,
-		.enable_mask = BIT(8),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_qupv3_wrap0_core_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch gcc_qupv3_wrap0_s0_clk = {
 	.halt_reg = 0x17030,
 	.halt_check = BRANCH_HALT_VOTED,
@@ -2222,32 +2228,6 @@
 	},
 };
 
-static struct clk_branch gcc_qupv3_wrap1_core_2x_clk = {
-	.halt_reg = 0x18004,
-	.halt_check = BRANCH_HALT_VOTED,
-	.clkr = {
-		.enable_reg = 0x5200c,
-		.enable_mask = BIT(18),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_qupv3_wrap1_core_2x_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
-static struct clk_branch gcc_qupv3_wrap1_core_clk = {
-	.halt_reg = 0x18008,
-	.halt_check = BRANCH_HALT_VOTED,
-	.clkr = {
-		.enable_reg = 0x5200c,
-		.enable_mask = BIT(19),
-		.hw.init = &(struct clk_init_data){
-			.name = "gcc_qupv3_wrap1_core_clk",
-			.ops = &clk_branch2_ops,
-		},
-	},
-};
-
 static struct clk_branch gcc_qupv3_wrap1_s0_clk = {
 	.halt_reg = 0x18014,
 	.halt_check = BRANCH_HALT_VOTED,
@@ -3147,6 +3127,13 @@
 	},
 };
 
+struct clk_hw *gcc_sdm845_hws[] = {
+	[MEASURE_ONLY_SNOC_CLK] = &measure_only_snoc_clk.hw,
+	[MEASURE_ONLY_CNOC_CLK] = &measure_only_cnoc_clk.hw,
+	[MEASURE_ONLY_BIMC_CLK] = &measure_only_bimc_clk.hw,
+	[MEASURE_ONLY_IPA_2X_CLK] = &measure_only_ipa_2x_clk.hw,
+};
+
 static struct clk_regmap *gcc_sdm845_clocks[] = {
 	[GCC_AGGRE_NOC_PCIE_TBU_CLK] = &gcc_aggre_noc_pcie_tbu_clk.clkr,
 	[GCC_AGGRE_UFS_CARD_AXI_CLK] = &gcc_aggre_ufs_card_axi_clk.clkr,
@@ -3219,8 +3206,6 @@
 	[GCC_QMIP_CAMERA_AHB_CLK] = &gcc_qmip_camera_ahb_clk.clkr,
 	[GCC_QMIP_DISP_AHB_CLK] = &gcc_qmip_disp_ahb_clk.clkr,
 	[GCC_QMIP_VIDEO_AHB_CLK] = &gcc_qmip_video_ahb_clk.clkr,
-	[GCC_QUPV3_WRAP0_CORE_2X_CLK] = &gcc_qupv3_wrap0_core_2x_clk.clkr,
-	[GCC_QUPV3_WRAP0_CORE_CLK] = &gcc_qupv3_wrap0_core_clk.clkr,
 	[GCC_QUPV3_WRAP0_S0_CLK] = &gcc_qupv3_wrap0_s0_clk.clkr,
 	[GCC_QUPV3_WRAP0_S0_CLK_SRC] = &gcc_qupv3_wrap0_s0_clk_src.clkr,
 	[GCC_QUPV3_WRAP0_S1_CLK] = &gcc_qupv3_wrap0_s1_clk.clkr,
@@ -3237,8 +3222,6 @@
 	[GCC_QUPV3_WRAP0_S6_CLK_SRC] = &gcc_qupv3_wrap0_s6_clk_src.clkr,
 	[GCC_QUPV3_WRAP0_S7_CLK] = &gcc_qupv3_wrap0_s7_clk.clkr,
 	[GCC_QUPV3_WRAP0_S7_CLK_SRC] = &gcc_qupv3_wrap0_s7_clk_src.clkr,
-	[GCC_QUPV3_WRAP1_CORE_2X_CLK] = &gcc_qupv3_wrap1_core_2x_clk.clkr,
-	[GCC_QUPV3_WRAP1_CORE_CLK] = &gcc_qupv3_wrap1_core_clk.clkr,
 	[GCC_QUPV3_WRAP1_S0_CLK] = &gcc_qupv3_wrap1_s0_clk.clkr,
 	[GCC_QUPV3_WRAP1_S0_CLK_SRC] = &gcc_qupv3_wrap1_s0_clk_src.clkr,
 	[GCC_QUPV3_WRAP1_S1_CLK] = &gcc_qupv3_wrap1_s1_clk.clkr,
@@ -3385,8 +3368,9 @@
 
 static int gcc_sdm845_probe(struct platform_device *pdev)
 {
+	struct clk *clk;
 	struct regmap *regmap;
-	int ret = 0;
+	int i, ret = 0;
 
 	regmap = qcom_cc_map(pdev, &gcc_sdm845_desc);
 	if (IS_ERR(regmap))
@@ -3416,6 +3400,13 @@
 		return PTR_ERR(vdd_cx_ao.regulator[0]);
 	}
 
+	/* Register the dummy measurement clocks */
+	for (i = 0; i < ARRAY_SIZE(gcc_sdm845_hws); i++) {
+		clk = devm_clk_register(&pdev->dev, gcc_sdm845_hws[i]);
+		if (IS_ERR(clk))
+			return PTR_ERR(clk);
+	}
+
 	ret = qcom_cc_really_probe(pdev, &gcc_sdm845_desc, regmap);
 	if (ret) {
 		dev_err(&pdev->dev, "Failed to register GCC clocks\n");
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 4d2b81f..fd02eba 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -385,6 +385,58 @@
 	  Select this to offload Samsung S5PV210 or S5PC110, Exynos from AES
 	  algorithms execution.
 
+config CRYPTO_DEV_QCE50
+        bool
+
+config FIPS_ENABLE
+        bool "FIPS140-2 compliant build"
+        default n
+        help
+          This flag is used to make current build FIPS140-2
+          compliant. This flag will enable the patch of code
+          which will perform this task. Please select Y here
+          to enable.
+
+config CRYPTO_DEV_QCRYPTO
+        tristate "QTI Crypto accelerator"
+        select CRYPTO_DES
+        select CRYPTO_ALGAPI
+        select CRYPTO_AUTHENC
+        select CRYPTO_BLKCIPHER
+        default n
+        help
+          This driver supports QTI crypto acceleration
+          for kernel clients. To compile this driver as a module,
+          choose M here: the module will be called qcrypto. Please
+          select Y here to enable.
+
+config CRYPTO_DEV_QCOM_MSM_QCE
+        tristate "QTI Crypto Engine (QCE) module"
+        default n
+        help
+          This driver supports QTI Crypto Engine 5.0.
+          To compile this driver as a module, choose M here: the
+          module is called qce50.
+
+config CRYPTO_DEV_QCEDEV
+        tristate "QCEDEV Interface to CE module"
+        default n
+        help
+          This driver supports QTI QCEDEV Crypto Engine 5.0.
+          This exposes the interface to the QCE hardware accelerator
+          via IOCTLs.
+
+          To compile this driver as a module, choose M here: the
+          module will be called qcedev.
+
+config CRYPTO_DEV_OTA_CRYPTO
+        tristate "OTA Crypto module"
+        help
+          This driver supports QTI OTA Crypto in the FSM9xxx.
+          To compile this driver as a module, choose M here: the
+          module will be called ota_crypto. Please select Y here
+          to enable.
+
 config CRYPTO_DEV_NX
 	bool "Support for IBM PowerPC Nest (NX) cryptographic acceleration"
 	depends on PPC64
@@ -555,4 +607,8 @@
 
 source "drivers/crypto/chelsio/Kconfig"
 
+if ARCH_QCOM
+source drivers/crypto/msm/Kconfig
+endif # ARCH_QCOM
+
 endif # CRYPTO_HW
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile
index ad7250f..5f7b988 100644
--- a/drivers/crypto/Makefile
+++ b/drivers/crypto/Makefile
@@ -28,6 +28,7 @@
 obj-$(CONFIG_CRYPTO_DEV_UX500) += ux500/
 obj-$(CONFIG_CRYPTO_DEV_QAT) += qat/
 obj-$(CONFIG_CRYPTO_DEV_QCE) += qce/
+obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += msm/
 obj-$(CONFIG_CRYPTO_DEV_VMX) += vmx/
 obj-$(CONFIG_CRYPTO_DEV_SUN4I_SS) += sunxi-ss/
 obj-$(CONFIG_CRYPTO_DEV_ROCKCHIP) += rockchip/
diff --git a/drivers/crypto/msm/Kconfig b/drivers/crypto/msm/Kconfig
new file mode 100644
index 0000000..0f4568b
--- /dev/null
+++ b/drivers/crypto/msm/Kconfig
@@ -0,0 +1,10 @@
+
+config CRYPTO_DEV_QCOM_ICE
+	tristate "Inline Crypto Module"
+	default n
+	depends on PFK && BLK_DEV_DM
+	help
+	  This driver supports Inline Crypto Engine for QTI chipsets, MSM8994
+	  and later, to accelerate crypto operations for storage needs.
+	  To compile this driver as a module, choose M here: the
+	  module will be called ice.
diff --git a/drivers/crypto/msm/Makefile b/drivers/crypto/msm/Makefile
new file mode 100644
index 0000000..9ecb646
--- /dev/null
+++ b/drivers/crypto/msm/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_CRYPTO_DEV_QCOM_MSM_QCE) += qce50.o
+obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev.o
+obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto.o
+obj-$(CONFIG_CRYPTO_DEV_OTA_CRYPTO) += ota_crypto.o
+obj-$(CONFIG_CRYPTO_DEV_QCOM_ICE) += ice.o
diff --git a/drivers/crypto/msm/compat_qcedev.c b/drivers/crypto/msm/compat_qcedev.c
new file mode 100644
index 0000000..0ca28be
--- /dev/null
+++ b/drivers/crypto/msm/compat_qcedev.c
@@ -0,0 +1,431 @@
+/*
+ * QTI CE 32-bit compatibility syscall for 64-bit systems
+ *
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/qcedev.h>
+#include <linux/compat.h>
+#include "compat_qcedev.h"
+
+static int compat_get_qcedev_pmem_info(
+		struct compat_qcedev_pmem_info __user *pmem32,
+		struct qcedev_pmem_info __user *pmem)
+{
+	compat_ulong_t offset;
+	compat_int_t fd_src;
+	compat_int_t fd_dst;
+	int err = 0, i = 0;
+	uint32_t len;
+
+	err |= get_user(fd_src, &pmem32->fd_src);
+	err |= put_user(fd_src, &pmem->fd_src);
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(offset, &pmem32->src[i].offset);
+		err |= put_user(offset, &pmem->src[i].offset);
+		err |= get_user(len, &pmem32->src[i].len);
+		err |= put_user(len, &pmem->src[i].len);
+	}
+
+	err |= get_user(fd_dst, &pmem32->fd_dst);
+	err |= put_user(fd_dst, &pmem->fd_dst);
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(offset, &pmem32->dst[i].offset);
+		err |= put_user(offset, &pmem->dst[i].offset);
+		err |= get_user(len, &pmem32->dst[i].len);
+		err |= put_user(len, &pmem->dst[i].len);
+	}
+
+	return err;
+}
+
+static int compat_put_qcedev_pmem_info(
+		struct compat_qcedev_pmem_info __user *pmem32,
+		struct qcedev_pmem_info __user *pmem)
+{
+	compat_ulong_t offset;
+	compat_int_t fd_src;
+	compat_int_t fd_dst;
+	int err = 0, i = 0;
+	uint32_t len;
+
+	err |= get_user(fd_src, &pmem->fd_src);
+	err |= put_user(fd_src, &pmem32->fd_src);
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(offset, &pmem->src[i].offset);
+		err |= put_user(offset, &pmem32->src[i].offset);
+		err |= get_user(len, &pmem->src[i].len);
+		err |= put_user(len, &pmem32->src[i].len);
+	}
+
+	err |= get_user(fd_dst, &pmem->fd_dst);
+	err |= put_user(fd_dst, &pmem32->fd_dst);
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(offset, &pmem->dst[i].offset);
+		err |= put_user(offset, &pmem32->dst[i].offset);
+		err |= get_user(len, &pmem->dst[i].len);
+		err |= put_user(len, &pmem32->dst[i].len);
+	}
+
+	return err;
+}
+
+static int compat_get_qcedev_vbuf_info(
+		struct compat_qcedev_vbuf_info __user *vbuf32,
+		struct qcedev_vbuf_info __user *vbuf)
+{
+	compat_uptr_t vaddr;
+	int err = 0, i = 0;
+	uint32_t len;
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(vaddr, &vbuf32->src[i].vaddr);
+		vbuf->src[i].vaddr = NULL;
+		err |= put_user(vaddr, (compat_uptr_t *)&vbuf->src[i].vaddr);
+		err |= get_user(len, &vbuf32->src[i].len);
+		err |= put_user(len, &vbuf->src[i].len);
+	}
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(vaddr, &vbuf32->dst[i].vaddr);
+		vbuf->dst[i].vaddr = NULL;
+		err |= put_user(vaddr, (compat_uptr_t *)&vbuf->dst[i].vaddr);
+		err |= get_user(len, &vbuf32->dst[i].len);
+		err |= put_user(len, &vbuf->dst[i].len);
+	}
+	return err;
+}
+
+static int compat_put_qcedev_vbuf_info(
+		struct compat_qcedev_vbuf_info __user *vbuf32,
+		struct qcedev_vbuf_info __user *vbuf)
+{
+	compat_uptr_t vaddr;
+	int err = 0, i = 0;
+	uint32_t len;
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(vaddr, (compat_uptr_t *)&vbuf->src[i].vaddr);
+		vbuf32->src[i].vaddr = 0;
+		err |= put_user(vaddr, &vbuf32->src[i].vaddr);
+		err |= get_user(len, &vbuf->src[i].len);
+		err |= put_user(len, &vbuf32->src[i].len);
+	}
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(vaddr, (compat_uptr_t *)&vbuf->dst[i].vaddr);
+		vbuf32->dst[i].vaddr = 0;
+		err |= put_user(vaddr, &vbuf32->dst[i].vaddr);
+		err |= get_user(len, &vbuf->dst[i].len);
+		err |= put_user(len, &vbuf32->dst[i].len);
+	}
+	return err;
+}
+
+static int compat_get_qcedev_cipher_op_req(
+		struct compat_qcedev_cipher_op_req __user *data32,
+		struct qcedev_cipher_op_req __user *data)
+{
+	enum qcedev_cipher_mode_enum mode;
+	enum qcedev_cipher_alg_enum alg;
+	compat_ulong_t byteoffset;
+	enum qcedev_oper_enum op;
+	compat_ulong_t data_len;
+	compat_ulong_t encklen;
+	compat_ulong_t entries;
+	compat_ulong_t ivlen;
+	uint8_t in_place_op;
+	int err = 0, i = 0;
+	uint8_t use_pmem;
+	uint8_t enckey;
+	uint8_t iv;
+
+	err |= get_user(use_pmem, &data32->use_pmem);
+	err |= put_user(use_pmem, &data->use_pmem);
+
+	if (use_pmem)
+		err |= compat_get_qcedev_pmem_info(&data32->pmem, &data->pmem);
+	else
+		err |= compat_get_qcedev_vbuf_info(&data32->vbuf, &data->vbuf);
+
+	err |= get_user(entries, &data32->entries);
+	err |= put_user(entries, &data->entries);
+	err |= get_user(data_len, &data32->data_len);
+	err |= put_user(data_len, &data->data_len);
+	err |= get_user(in_place_op, &data32->in_place_op);
+	err |= put_user(in_place_op, &data->in_place_op);
+
+	for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+		err |= get_user(enckey, &(data32->enckey[i]));
+		err |= put_user(enckey, &(data->enckey[i]));
+	}
+
+	err |= get_user(encklen, &data32->encklen);
+	err |= put_user(encklen, &data->encklen);
+
+	for (i = 0; i < QCEDEV_MAX_IV_SIZE; i++) {
+		err |= get_user(iv, &(data32->iv[i]));
+		err |= put_user(iv, &(data->iv[i]));
+	}
+
+	err |= get_user(ivlen, &data32->ivlen);
+	err |= put_user(ivlen, &data->ivlen);
+	err |= get_user(byteoffset, &data32->byteoffset);
+	err |= put_user(byteoffset, &data->byteoffset);
+	err |= get_user(alg, &data32->alg);
+	err |= put_user(alg, &data->alg);
+	err |= get_user(mode, &data32->mode);
+	err |= put_user(mode, &data->mode);
+	err |= get_user(op, &data32->op);
+	err |= put_user(op, &data->op);
+
+	return err;
+}
+
+static int compat_put_qcedev_cipher_op_req(
+		struct compat_qcedev_cipher_op_req __user *data32,
+		struct qcedev_cipher_op_req __user *data)
+{
+	enum qcedev_cipher_mode_enum mode;
+	enum qcedev_cipher_alg_enum alg;
+	compat_ulong_t byteoffset;
+	enum qcedev_oper_enum op;
+	compat_ulong_t data_len;
+	compat_ulong_t encklen;
+	compat_ulong_t entries;
+	compat_ulong_t ivlen;
+	uint8_t in_place_op;
+	int err = 0, i = 0;
+	uint8_t use_pmem;
+	uint8_t enckey;
+	uint8_t iv;
+
+	err |= get_user(use_pmem, &data->use_pmem);
+	err |= put_user(use_pmem, &data32->use_pmem);
+
+	if (use_pmem)
+		err |= compat_put_qcedev_pmem_info(&data32->pmem, &data->pmem);
+	else
+		err |= compat_put_qcedev_vbuf_info(&data32->vbuf, &data->vbuf);
+
+	err |= get_user(entries, &data->entries);
+	err |= put_user(entries, &data32->entries);
+	err |= get_user(data_len, &data->data_len);
+	err |= put_user(data_len, &data32->data_len);
+	err |= get_user(in_place_op, &data->in_place_op);
+	err |= put_user(in_place_op, &data32->in_place_op);
+
+	for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+		err |= get_user(enckey, &(data->enckey[i]));
+		err |= put_user(enckey, &(data32->enckey[i]));
+	}
+
+	err |= get_user(encklen, &data->encklen);
+	err |= put_user(encklen, &data32->encklen);
+
+	for (i = 0; i < QCEDEV_MAX_IV_SIZE; i++) {
+		err |= get_user(iv, &(data->iv[i]));
+		err |= put_user(iv, &(data32->iv[i]));
+	}
+
+	err |= get_user(ivlen, &data->ivlen);
+	err |= put_user(ivlen, &data32->ivlen);
+	err |= get_user(byteoffset, &data->byteoffset);
+	err |= put_user(byteoffset, &data32->byteoffset);
+	err |= get_user(alg, &data->alg);
+	err |= put_user(alg, &data32->alg);
+	err |= get_user(mode, &data->mode);
+	err |= put_user(mode, &data32->mode);
+	err |= get_user(op, &data->op);
+	err |= put_user(op, &data32->op);
+
+	return err;
+}
+
+static int compat_get_qcedev_sha_op_req(
+		struct compat_qcedev_sha_op_req __user *data32,
+		struct qcedev_sha_op_req __user *data)
+{
+	enum qcedev_sha_alg_enum alg;
+	compat_ulong_t authklen;
+	compat_ulong_t data_len;
+	compat_ulong_t entries;
+	compat_ulong_t diglen;
+	compat_uptr_t authkey;
+	compat_uptr_t vaddr;
+	int err = 0, i = 0;
+	uint8_t digest;
+	uint32_t len;
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(vaddr, &data32->data[i].vaddr);
+		data->data[i].vaddr = 0;
+		err |= put_user(vaddr, (compat_uptr_t *)&data->data[i].vaddr);
+		err |= get_user(len, &data32->data[i].len);
+		err |= put_user(len, &data->data[i].len);
+	}
+
+	err |= get_user(entries, &data32->entries);
+	err |= put_user(entries, &data->entries);
+	err |= get_user(data_len, &data32->data_len);
+	err |= put_user(data_len, &data->data_len);
+
+	for (i = 0; i < QCEDEV_MAX_SHA_DIGEST; i++) {
+		err |= get_user(digest, &(data32->digest[i]));
+		err |= put_user(digest, &(data->digest[i]));
+	}
+
+	err |= get_user(diglen, &data32->diglen);
+	err |= put_user(diglen, &data->diglen);
+	err |= get_user(authkey, &data32->authkey);
+	data->authkey = NULL;
+	err |= put_user(authkey, (compat_uptr_t *)&data->authkey);
+	err |= get_user(authklen, &data32->authklen);
+	err |= put_user(authklen, &data->authklen);
+	err |= get_user(alg, &data32->alg);
+	err |= put_user(alg, &data->alg);
+
+	return err;
+}
+
+static int compat_put_qcedev_sha_op_req(
+		struct compat_qcedev_sha_op_req __user *data32,
+		struct qcedev_sha_op_req __user *data)
+{
+	enum qcedev_sha_alg_enum alg;
+	compat_ulong_t authklen;
+	compat_ulong_t data_len;
+	compat_ulong_t entries;
+	compat_ulong_t diglen;
+	compat_uptr_t authkey;
+	compat_uptr_t vaddr;
+	int err = 0, i = 0;
+	uint8_t digest;
+	uint32_t len;
+
+	for (i = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+		err |= get_user(vaddr, (compat_uptr_t *)&data->data[i].vaddr);
+		data32->data[i].vaddr = 0;
+		err |= put_user(vaddr, &data32->data[i].vaddr);
+		err |= get_user(len, &data->data[i].len);
+		err |= put_user(len, &data32->data[i].len);
+	}
+
+	err |= get_user(entries, &data->entries);
+	err |= put_user(entries, &data32->entries);
+	err |= get_user(data_len, &data->data_len);
+	err |= put_user(data_len, &data32->data_len);
+
+	for (i = 0; i < QCEDEV_MAX_SHA_DIGEST; i++) {
+		err |= get_user(digest, &(data->digest[i]));
+		err |= put_user(digest, &(data32->digest[i]));
+	}
+
+	err |= get_user(diglen, &data->diglen);
+	err |= put_user(diglen, &data32->diglen);
+	err |= get_user(authkey, (compat_uptr_t *)&data->authkey);
+	data32->authkey = 0;
+	err |= put_user(authkey, &data32->authkey);
+	err |= get_user(authklen, &data->authklen);
+	err |= put_user(authklen, &data32->authklen);
+	err |= get_user(alg, &data->alg);
+	err |= put_user(alg, &data32->alg);
+
+	return err;
+}
+
+static unsigned int convert_cmd(unsigned int cmd)
+{
+	switch (cmd) {
+	case COMPAT_QCEDEV_IOCTL_ENC_REQ:
+		return QCEDEV_IOCTL_ENC_REQ;
+	case COMPAT_QCEDEV_IOCTL_DEC_REQ:
+		return QCEDEV_IOCTL_DEC_REQ;
+	case COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ:
+		return QCEDEV_IOCTL_SHA_INIT_REQ;
+	case COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ:
+		return QCEDEV_IOCTL_SHA_UPDATE_REQ;
+	case COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ:
+		return QCEDEV_IOCTL_SHA_FINAL_REQ;
+	case COMPAT_QCEDEV_IOCTL_GET_SHA_REQ:
+		return QCEDEV_IOCTL_GET_SHA_REQ;
+	case COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ:
+		return QCEDEV_IOCTL_GET_CMAC_REQ;
+	default:
+		return cmd;
+	}
+
+}
+
+long compat_qcedev_ioctl(struct file *file,
+		unsigned int cmd, unsigned long arg)
+{
+	long ret;
+
+	switch (cmd) {
+	case COMPAT_QCEDEV_IOCTL_ENC_REQ:
+	case COMPAT_QCEDEV_IOCTL_DEC_REQ: {
+		struct compat_qcedev_cipher_op_req __user *data32;
+		struct qcedev_cipher_op_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (!data)
+			return -EFAULT;
+
+		err = compat_get_qcedev_cipher_op_req(data32, data);
+		if (err)
+			return err;
+
+		ret = qcedev_ioctl(file, convert_cmd(cmd), (unsigned long)data);
+		err = compat_put_qcedev_cipher_op_req(data32, data);
+		return ret ? ret : err;
+	}
+	case COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ:
+	case COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ:
+	case COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ:
+	case COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ:
+	case COMPAT_QCEDEV_IOCTL_GET_SHA_REQ: {
+		struct compat_qcedev_sha_op_req __user *data32;
+		struct qcedev_sha_op_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (!data)
+			return -EFAULT;
+
+		err = compat_get_qcedev_sha_op_req(data32, data);
+		if (err)
+			return err;
+
+		ret = qcedev_ioctl(file, convert_cmd(cmd), (unsigned long)data);
+		err = compat_put_qcedev_sha_op_req(data32, data);
+		return ret ? ret : err;
+	}
+	default:
+		return -ENOIOCTLCMD;
+	}
+	return 0;
+}
+EXPORT_SYMBOL(compat_qcedev_ioctl);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI 32-64 Compatibility for Crypto driver");
diff --git a/drivers/crypto/msm/compat_qcedev.h b/drivers/crypto/msm/compat_qcedev.h
new file mode 100644
index 0000000..4cc3933
--- /dev/null
+++ b/drivers/crypto/msm/compat_qcedev.h
@@ -0,0 +1,165 @@
+#ifndef _UAPI_COMPAT_QCEDEV__H
+#define _UAPI_COMPAT_QCEDEV__H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#if IS_ENABLED(CONFIG_COMPAT)
+#include <linux/compat.h>
+
+/**
+ * struct compat_buf_info - Buffer information
+ * @offset:			Offset from the base address of the buffer
+ *				(Used when buffer is allocated using PMEM)
+ * @vaddr:			Virtual buffer address pointer
+ * @len:				Size of the buffer
+ */
+struct	compat_buf_info {
+	union {
+		compat_ulong_t	offset;
+		compat_uptr_t	vaddr;
+	};
+	compat_ulong_t	len;
+};
+
+/**
+ * struct compat_qcedev_vbuf_info - Source and destination Buffer information
+ * @src:				Array of buf_info for input/source
+ * @dst:				Array of buf_info for output/destination
+ */
+struct	compat_qcedev_vbuf_info {
+	struct compat_buf_info	src[QCEDEV_MAX_BUFFERS];
+	struct compat_buf_info	dst[QCEDEV_MAX_BUFFERS];
+};
+
+/**
+ * struct compat_qcedev_pmem_info - Stores PMEM buffer information
+ * @fd_src:			Handle to /dev/adsp_pmem used to allocate
+ *				memory for input/src buffer
+ * @src:				Array of buf_info for input/source
+ * @fd_dst:			Handle to /dev/adsp_pmem used to allocate
+ *				memory for output/dst buffer
+ * @dst:				Array of buf_info for output/destination
+ * @pmem_src_offset:		The offset from input/src buffer
+ *				(allocated by PMEM)
+ */
+struct	compat_qcedev_pmem_info {
+	compat_int_t		fd_src;
+	struct compat_buf_info	src[QCEDEV_MAX_BUFFERS];
+	compat_int_t		fd_dst;
+	struct compat_buf_info	dst[QCEDEV_MAX_BUFFERS];
+};
+
+/**
+ * struct compat_qcedev_cipher_op_req - Holds the ciphering request information
+ * @use_pmem (IN):	Flag to indicate if buffer source is PMEM
+ *			QCEDEV_USE_PMEM/QCEDEV_NO_PMEM
+ * @pmem (IN):		Stores PMEM buffer information.
+ *			Refer struct qcedev_pmem_info
+ * @vbuf (IN/OUT):	Stores Source and destination Buffer information
+ *			Refer to struct qcedev_vbuf_info
+ * @data_len (IN):	Total Length of input/src and output/dst in bytes
+ * @in_place_op (IN):	Indicates whether the operation is inplace where
+ *			source == destination
+ *			When using PMEM allocated memory, must set this to 1
+ * @enckey (IN):		128 bits of confidentiality key
+ *			enckey[0] bit 127-120, enckey[1] bit 119-112,..
+ *			enckey[15] bit 7-0
+ * @encklen (IN):	Length of the encryption key(set to 128  bits/16
+ *			bytes in the driver)
+ * @iv (IN/OUT):		Initialization vector data
+ *			This is updated by the driver, incremented by
+ *			number of blocks encrypted/decrypted.
+ * @ivlen (IN):		Length of the IV
+ * @byteoffset (IN):	Offset in the Cipher BLOCK (applicable and to be set
+ *			for AES-128 CTR mode only)
+ * @alg (IN):		Type of ciphering algorithm: AES/DES/3DES
+ * @mode (IN):		Mode use when using AES algorithm: ECB/CBC/CTR
+ *			Applicable when using AES algorithm only
+ * @op (IN):		Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or
+ *			QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY
+ *
+ * If use_pmem is set to 0, the driver assumes that memory was not allocated
+ * via PMEM, and kernel will need to allocate memory and copy data from user
+ * space buffer (data_src/dta_dst) and process accordingly and copy data back
+ * to the user space buffer
+ *
+ * If use_pmem is set to 1, the driver assumes that memory was allocated via
+ * PMEM.
+ * The kernel driver will use the fd_src to determine the kernel virtual address
+ * base that maps to the user space virtual address base for the  buffer
+ * allocated in user space.
+ * The final input/src and output/dst buffer pointer will be determined
+ * by adding the offsets to the kernel virtual addr.
+ *
+ * If use of hardware key is supported in the target, user can configure the
+ * key parameters (encklen, enckey) to use the hardware key.
+ * In order to use the hardware key, set encklen to 0 and set the enckey
+ * data array to 0.
+ */
+struct	compat_qcedev_cipher_op_req {
+	uint8_t					use_pmem;
+	union {
+		struct compat_qcedev_pmem_info	pmem;
+		struct compat_qcedev_vbuf_info	vbuf;
+	};
+	compat_ulong_t				entries;
+	compat_ulong_t				data_len;
+	uint8_t					in_place_op;
+	uint8_t					enckey[QCEDEV_MAX_KEY_SIZE];
+	compat_ulong_t				encklen;
+	uint8_t					iv[QCEDEV_MAX_IV_SIZE];
+	compat_ulong_t				ivlen;
+	compat_ulong_t				byteoffset;
+	enum qcedev_cipher_alg_enum		alg;
+	enum qcedev_cipher_mode_enum		mode;
+	enum qcedev_oper_enum			op;
+};
+
+/**
+ * struct qcedev_sha_op_req - Holds the hashing request information
+ * @data (IN):			Array of pointers to the data to be hashed
+ * @entries (IN):		Number of buf_info entries in the data array
+ * @data_len (IN):		Length of data to be hashed
+ * @digest (IN/OUT):		Returns the hashed data information
+ * @diglen (OUT):		Size of the hashed/digest data
+ * @authkey (IN):		Pointer to authentication key for HMAC
+ * @authklen (IN):		Size of the authentication key
+ * @alg (IN):			Secure Hash algorithm
+ */
+struct	compat_qcedev_sha_op_req {
+	struct compat_buf_info			data[QCEDEV_MAX_BUFFERS];
+	compat_ulong_t				entries;
+	compat_ulong_t				data_len;
+	uint8_t					digest[QCEDEV_MAX_SHA_DIGEST];
+	compat_ulong_t				diglen;
+	compat_uptr_t				authkey;
+	compat_ulong_t				authklen;
+	enum qcedev_sha_alg_enum		alg;
+};
+
+struct file;
+extern long compat_qcedev_ioctl(struct file *file,
+			unsigned int cmd, unsigned long arg);
+
+#define COMPAT_QCEDEV_IOCTL_ENC_REQ		\
+	_IOWR(QCEDEV_IOC_MAGIC, 1, struct compat_qcedev_cipher_op_req)
+#define COMPAT_QCEDEV_IOCTL_DEC_REQ		\
+	_IOWR(QCEDEV_IOC_MAGIC, 2, struct compat_qcedev_cipher_op_req)
+#define COMPAT_QCEDEV_IOCTL_SHA_INIT_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 3, struct compat_qcedev_sha_op_req)
+#define COMPAT_QCEDEV_IOCTL_SHA_UPDATE_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 4, struct compat_qcedev_sha_op_req)
+#define COMPAT_QCEDEV_IOCTL_SHA_FINAL_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 5, struct compat_qcedev_sha_op_req)
+#define COMPAT_QCEDEV_IOCTL_GET_SHA_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 6, struct compat_qcedev_sha_op_req)
+#define COMPAT_QCEDEV_IOCTL_LOCK_CE	\
+	_IO(QCEDEV_IOC_MAGIC, 7)
+#define COMPAT_QCEDEV_IOCTL_UNLOCK_CE	\
+	_IO(QCEDEV_IOC_MAGIC, 8)
+#define COMPAT_QCEDEV_IOCTL_GET_CMAC_REQ	\
+	_IOWR(QCEDEV_IOC_MAGIC, 9, struct compat_qcedev_sha_op_req)
+
+#endif /* CONFIG_COMPAT */
+#endif /* _UAPI_COMPAT_QCEDEV__H */
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
new file mode 100644
index 0000000..ba6825e
--- /dev/null
+++ b/drivers/crypto/msm/ice.c
@@ -0,0 +1,1761 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/device-mapper.h>
+#include <linux/clk.h>
+#include <linux/cdev.h>
+#include <linux/regulator/consumer.h>
+#include <linux/msm-bus.h>
+#include <linux/pfk.h>
+#include <crypto/ice.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/qseecomi.h>
+#include "iceregs.h"
+
+#define TZ_SYSCALL_CREATE_SMC_ID(o, s, f) \
+	((uint32_t)((((o & 0x3f) << 24) | (s & 0xff) << 8) | (f & 0xff)))
+
+#define TZ_OWNER_QSEE_OS                 50
+#define TZ_SVC_KEYSTORE                  5     /* Keystore management */
+
+#define TZ_OS_KS_RESTORE_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x06)
+
+#define TZ_SYSCALL_CREATE_PARAM_ID_0 0
+
+#define TZ_OS_KS_RESTORE_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_OS_KS_RESTORE_KEY_CONFIG_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x06)
+
+#define TZ_OS_KS_RESTORE_KEY_CONFIG_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+
+#define ICE_REV(x, y) (((x) & ICE_CORE_##y##_REV_MASK) >> ICE_CORE_##y##_REV)
+#define QCOM_UFS_ICE_DEV	"iceufs"
+#define QCOM_SDCC_ICE_DEV	"icesdcc"
+#define QCOM_ICE_TYPE_NAME_LEN 8
+#define QCOM_ICE_MAX_BIST_CHECK_COUNT 100
+#define QCOM_ICE_UFS		10
+#define QCOM_ICE_SDCC		20
+
+struct ice_clk_info {
+	struct list_head list;
+	struct clk *clk;
+	const char *name;
+	u32 max_freq;
+	u32 min_freq;
+	u32 curr_freq;
+	bool enabled;
+};
+
+struct qcom_ice_bus_vote {
+	uint32_t client_handle;
+	uint32_t curr_vote;
+	int min_bw_vote;
+	int max_bw_vote;
+	int saved_vote;
+	bool is_max_bw_needed;
+	struct device_attribute max_bus_bw;
+};
+
+static LIST_HEAD(ice_devices);
+/*
+ * ICE HW device structure.
+ */
+struct ice_device {
+	struct list_head	list;
+	struct device		*pdev;
+	struct cdev		cdev;
+	dev_t			device_no;
+	struct class		*driver_class;
+	void __iomem		*mmio;
+	struct resource		*res;
+	int			irq;
+	bool			is_ice_enabled;
+	bool			is_ice_disable_fuse_blown;
+	ice_error_cb		error_cb;
+	void			*host_controller_data; /* UFS/EMMC/other? */
+	struct list_head	clk_list_head;
+	u32			ice_hw_version;
+	bool			is_ice_clk_available;
+	char			ice_instance_type[QCOM_ICE_TYPE_NAME_LEN];
+	struct regulator	*reg;
+	bool			is_regulator_available;
+	struct qcom_ice_bus_vote bus_vote;
+	ktime_t			ice_reset_start_time;
+	ktime_t			ice_reset_complete_time;
+};
+
+static int qti_ice_setting_config(struct request *req,
+		struct platform_device *pdev,
+		struct ice_crypto_setting *crypto_data,
+		struct ice_data_setting *setting)
+{
+	struct ice_device *ice_dev = NULL;
+
+	ice_dev = platform_get_drvdata(pdev);
+
+	if (!ice_dev) {
+		pr_debug("%s no ICE device\n", __func__);
+
+		/* make the caller finish peacfully */
+		return 0;
+	}
+
+	if (ice_dev->is_ice_disable_fuse_blown) {
+		pr_err("%s ICE disabled fuse is blown\n", __func__);
+		return -EPERM;
+	}
+
+	if ((short)(crypto_data->key_index) >= 0) {
+
+		memcpy(&setting->crypto_data, crypto_data,
+				sizeof(setting->crypto_data));
+
+		if (rq_data_dir(req) == WRITE)
+			setting->encr_bypass = false;
+		else if (rq_data_dir(req) == READ)
+			setting->decr_bypass = false;
+		else {
+			/* Should I say BUG_ON */
+			setting->encr_bypass = true;
+			setting->decr_bypass = true;
+		}
+	}
+
+	return 0;
+}
+
+static int qcom_ice_enable_clocks(struct ice_device *, bool);
+
+#ifdef CONFIG_MSM_BUS_SCALING
+
+static int qcom_ice_set_bus_vote(struct ice_device *ice_dev, int vote)
+{
+	int err = 0;
+
+	if (vote != ice_dev->bus_vote.curr_vote) {
+		err = msm_bus_scale_client_update_request(
+				ice_dev->bus_vote.client_handle, vote);
+		if (err) {
+			dev_err(ice_dev->pdev,
+				"%s:failed:client_handle=0x%x, vote=%d, err=%d\n",
+				__func__, ice_dev->bus_vote.client_handle,
+				vote, err);
+			goto out;
+		}
+		ice_dev->bus_vote.curr_vote = vote;
+	}
+out:
+	return err;
+}
+
+static int qcom_ice_get_bus_vote(struct ice_device *ice_dev,
+		const char *speed_mode)
+{
+	struct device *dev = ice_dev->pdev;
+	struct device_node *np = dev->of_node;
+	int err;
+	const char *key = "qcom,bus-vector-names";
+
+	if (!speed_mode) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (ice_dev->bus_vote.is_max_bw_needed && !!strcmp(speed_mode, "MIN"))
+		err = of_property_match_string(np, key, "MAX");
+	else
+		err = of_property_match_string(np, key, speed_mode);
+out:
+	if (err < 0)
+		dev_err(dev, "%s: Invalid %s mode %d\n",
+				__func__, speed_mode, err);
+	return err;
+}
+
+static int qcom_ice_bus_register(struct ice_device *ice_dev)
+{
+	int err = 0;
+	struct msm_bus_scale_pdata *bus_pdata;
+	struct device *dev = ice_dev->pdev;
+	struct platform_device *pdev = to_platform_device(dev);
+	struct device_node *np = dev->of_node;
+
+	bus_pdata = msm_bus_cl_get_pdata(pdev);
+	if (!bus_pdata) {
+		dev_err(dev, "%s: failed to get bus vectors\n", __func__);
+		err = -ENODATA;
+		goto out;
+	}
+
+	err = of_property_count_strings(np, "qcom,bus-vector-names");
+	if (err < 0 || err != bus_pdata->num_usecases) {
+		dev_err(dev, "%s: Error = %d with qcom,bus-vector-names\n",
+				__func__, err);
+		goto out;
+	}
+	err = 0;
+
+	ice_dev->bus_vote.client_handle =
+			msm_bus_scale_register_client(bus_pdata);
+	if (!ice_dev->bus_vote.client_handle) {
+		dev_err(dev, "%s: msm_bus_scale_register_client failed\n",
+				__func__);
+		err = -EFAULT;
+		goto out;
+	}
+
+	/* cache the vote index for minimum and maximum bandwidth */
+	ice_dev->bus_vote.min_bw_vote = qcom_ice_get_bus_vote(ice_dev, "MIN");
+	ice_dev->bus_vote.max_bw_vote = qcom_ice_get_bus_vote(ice_dev, "MAX");
+out:
+	return err;
+}
+
+#else
+
+static int qcom_ice_set_bus_vote(struct ice_device *ice_dev, int vote)
+{
+	return 0;
+}
+
+static int qcom_ice_get_bus_vote(struct ice_device *ice_dev,
+		const char *speed_mode)
+{
+	return 0;
+}
+
+static int qcom_ice_bus_register(struct ice_device *ice_dev)
+{
+	return 0;
+}
+#endif /* CONFIG_MSM_BUS_SCALING */
+
+static int qcom_ice_get_vreg(struct ice_device *ice_dev)
+{
+	int ret = 0;
+
+	if (!ice_dev->is_regulator_available)
+		return 0;
+
+	if (ice_dev->reg)
+		return 0;
+
+	ice_dev->reg = devm_regulator_get(ice_dev->pdev, "vdd-hba");
+	if (IS_ERR(ice_dev->reg)) {
+		ret = PTR_ERR(ice_dev->reg);
+		dev_err(ice_dev->pdev, "%s: %s get failed, err=%d\n",
+			__func__, "vdd-hba-supply", ret);
+	}
+	return ret;
+}
+
+static void qcom_ice_config_proc_ignore(struct ice_device *ice_dev)
+{
+	u32 regval;
+
+	if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2 &&
+	    ICE_REV(ice_dev->ice_hw_version, MINOR) == 0 &&
+	    ICE_REV(ice_dev->ice_hw_version, STEP) == 0) {
+		regval = qcom_ice_readl(ice_dev,
+				QCOM_ICE_REGS_ADVANCED_CONTROL);
+		regval |= 0x800;
+		qcom_ice_writel(ice_dev, regval,
+				QCOM_ICE_REGS_ADVANCED_CONTROL);
+		/* Ensure register is updated */
+		mb();
+	}
+}
+
+static void qcom_ice_low_power_mode_enable(struct ice_device *ice_dev)
+{
+	u32 regval;
+
+	regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ADVANCED_CONTROL);
+	/*
+	 * Enable low power mode sequence
+	 * [0]-0, [1]-0, [2]-0, [3]-E, [4]-0, [5]-0, [6]-0, [7]-0
+	 */
+	regval |= 0x7000;
+	qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_ADVANCED_CONTROL);
+	/*
+	 * Ensure previous instructions was completed before issuing next
+	 * ICE initialization/optimization instruction
+	 */
+	mb();
+}
+
+static void qcom_ice_enable_test_bus_config(struct ice_device *ice_dev)
+{
+	/*
+	 * Configure & enable ICE_TEST_BUS_REG to reflect ICE intr lines
+	 * MAIN_TEST_BUS_SELECTOR = 0 (ICE_CONFIG)
+	 * TEST_BUS_REG_EN = 1 (ENABLE)
+	 */
+	u32 regval;
+
+	if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
+		return;
+
+	regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_TEST_BUS_CONTROL);
+	regval &= 0x0FFFFFFF;
+	/* TBD: replace 0x2 with define in iceregs.h */
+	regval |= 0x2;
+	qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_TEST_BUS_CONTROL);
+
+	/*
+	 * Ensure previous instructions was completed before issuing next
+	 * ICE initialization/optimization instruction
+	 */
+	mb();
+}
+
+static void qcom_ice_optimization_enable(struct ice_device *ice_dev)
+{
+	u32 regval;
+
+	regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ADVANCED_CONTROL);
+	if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
+		regval |= 0xD807100;
+	else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1)
+		regval |= 0x3F007100;
+
+	/* ICE Optimizations Enable Sequence */
+	udelay(5);
+	/* [0]-0, [1]-0, [2]-8, [3]-E, [4]-0, [5]-0, [6]-F, [7]-A */
+	qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_ADVANCED_CONTROL);
+	/*
+	 * Ensure previous instructions was completed before issuing next
+	 * ICE initialization/optimization instruction
+	 */
+	mb();
+
+	/* ICE HPG requires sleep before writing */
+	udelay(5);
+	if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1) {
+		regval = 0;
+		regval = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ENDIAN_SWAP);
+		regval |= 0xF;
+		qcom_ice_writel(ice_dev, regval, QCOM_ICE_REGS_ENDIAN_SWAP);
+		/*
+		 * Ensure previous instructions were completed before issue
+		 * next ICE commands
+		 */
+		mb();
+	}
+}
+
+static int qcom_ice_wait_bist_status(struct ice_device *ice_dev)
+{
+	int count;
+	u32 reg;
+
+	/* Poll until all BIST bits are reset */
+	for (count = 0; count < QCOM_ICE_MAX_BIST_CHECK_COUNT; count++) {
+		reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BIST_STATUS);
+		if (!(reg & ICE_BIST_STATUS_MASK))
+			break;
+		udelay(50);
+	}
+
+	if (reg)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int qcom_ice_enable(struct ice_device *ice_dev)
+{
+	unsigned int reg;
+	int ret = 0;
+
+	if ((ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) ||
+		((ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2) &&
+		 (ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1)))
+		ret = qcom_ice_wait_bist_status(ice_dev);
+	if (ret) {
+		dev_err(ice_dev->pdev, "BIST status error (%d)\n", ret);
+		return ret;
+	}
+
+	/* Starting ICE v3 enabling is done at storage controller (UFS/SDCC) */
+	if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 3)
+		return 0;
+
+	/*
+	 * To enable ICE, perform following
+	 * 1. Set IGNORE_CONTROLLER_RESET to USE in ICE_RESET register
+	 * 2. Disable GLOBAL_BYPASS bit in ICE_CONTROL register
+	 */
+	reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_RESET);
+
+	if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
+		reg &= 0x0;
+	else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1)
+		reg &= ~0x100;
+
+	qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_RESET);
+
+	/*
+	 * Ensure previous instructions was completed before issuing next
+	 * ICE initialization/optimization instruction
+	 */
+	mb();
+
+	reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_CONTROL);
+
+	if (ICE_REV(ice_dev->ice_hw_version, MAJOR) >= 2)
+		reg &= 0xFFFE;
+	else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1)
+		reg &= ~0x7;
+	qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_CONTROL);
+
+	/*
+	 * Ensure previous instructions was completed before issuing next
+	 * ICE initialization/optimization instruction
+	 */
+	mb();
+
+	if ((ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) ||
+		((ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2) &&
+		 (ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1))) {
+		reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BYPASS_STATUS);
+		if ((reg & 0x80000000) != 0x0) {
+			pr_err("%s: Bypass failed for ice = %p",
+				__func__, (void *)ice_dev);
+			WARN_ON(1);
+		}
+	}
+	return 0;
+}
+
+static int qcom_ice_verify_ice(struct ice_device *ice_dev)
+{
+	unsigned int rev;
+	unsigned int maj_rev, min_rev, step_rev;
+
+	rev = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_VERSION);
+	maj_rev = (rev & ICE_CORE_MAJOR_REV_MASK) >> ICE_CORE_MAJOR_REV;
+	min_rev = (rev & ICE_CORE_MINOR_REV_MASK) >> ICE_CORE_MINOR_REV;
+	step_rev = (rev & ICE_CORE_STEP_REV_MASK) >> ICE_CORE_STEP_REV;
+
+	if (maj_rev > ICE_CORE_CURRENT_MAJOR_VERSION) {
+		pr_err("%s: Unknown QC ICE device at %lu, rev %d.%d.%d\n",
+			__func__, (unsigned long)ice_dev->mmio,
+			maj_rev, min_rev, step_rev);
+		return -ENODEV;
+	}
+	ice_dev->ice_hw_version = rev;
+
+	dev_info(ice_dev->pdev, "QC ICE %d.%d.%d device found @0x%p\n",
+					maj_rev, min_rev, step_rev,
+					ice_dev->mmio);
+
+	return 0;
+}
+
+static void qcom_ice_enable_intr(struct ice_device *ice_dev)
+{
+	unsigned int reg;
+
+	reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
+	reg &= ~QCOM_ICE_NON_SEC_IRQ_MASK;
+	qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
+	/*
+	 * Ensure previous instructions was completed before issuing next
+	 * ICE initialization/optimization instruction
+	 */
+	mb();
+}
+
+static void qcom_ice_disable_intr(struct ice_device *ice_dev)
+{
+	unsigned int reg;
+
+	reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
+	reg |= QCOM_ICE_NON_SEC_IRQ_MASK;
+	qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_NON_SEC_IRQ_MASK);
+	/*
+	 * Ensure previous instructions was completed before issuing next
+	 * ICE initialization/optimization instruction
+	 */
+	mb();
+}
+
+static irqreturn_t qcom_ice_isr(int isr, void *data)
+{
+	irqreturn_t retval = IRQ_NONE;
+	u32 status;
+	struct ice_device *ice_dev = data;
+
+	status = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_STTS);
+	if (status) {
+		ice_dev->error_cb(ice_dev->host_controller_data, status);
+
+		/* Interrupt has been handled. Clear the IRQ */
+		qcom_ice_writel(ice_dev, status, QCOM_ICE_REGS_NON_SEC_IRQ_CLR);
+		/* Ensure instruction is completed */
+		mb();
+		retval = IRQ_HANDLED;
+	}
+	return retval;
+}
+
+static void qcom_ice_parse_ice_instance_type(struct platform_device *pdev,
+		struct ice_device *ice_dev)
+{
+	int ret = -1;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	const char *type;
+
+	ret = of_property_read_string_index(np, "qcom,instance-type", 0, &type);
+	if (ret) {
+		pr_err("%s: Could not get ICE instance type\n", __func__);
+		goto out;
+	}
+	strlcpy(ice_dev->ice_instance_type, type, QCOM_ICE_TYPE_NAME_LEN);
+out:
+	return;
+}
+
+static int qcom_ice_parse_clock_info(struct platform_device *pdev,
+		struct ice_device *ice_dev)
+{
+	int ret = -1, cnt, i, len;
+	struct device *dev = &pdev->dev;
+	struct device_node *np = dev->of_node;
+	char *name;
+	struct ice_clk_info *clki;
+	u32 *clkfreq = NULL;
+
+	if (!np)
+		goto out;
+
+	cnt = of_property_count_strings(np, "clock-names");
+	if (cnt <= 0) {
+		dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
+				__func__);
+		ret = cnt;
+		goto out;
+	}
+
+	if (!of_get_property(np, "qcom,op-freq-hz", &len)) {
+		dev_info(dev, "qcom,op-freq-hz property not specified\n");
+		goto out;
+	}
+
+	len = len/sizeof(*clkfreq);
+	if (len != cnt)
+		goto out;
+
+	clkfreq = devm_kzalloc(dev, len * sizeof(*clkfreq), GFP_KERNEL);
+	if (!clkfreq) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	ret = of_property_read_u32_array(np, "qcom,op-freq-hz", clkfreq, len);
+
+	INIT_LIST_HEAD(&ice_dev->clk_list_head);
+
+	for (i = 0; i < cnt; i++) {
+		ret = of_property_read_string_index(np,
+				"clock-names", i, (const char **)&name);
+		if (ret)
+			goto out;
+
+		clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
+		if (!clki) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		clki->max_freq = clkfreq[i];
+		clki->name = kstrdup(name, GFP_KERNEL);
+		list_add_tail(&clki->list, &ice_dev->clk_list_head);
+	}
+out:
+	if (clkfreq)
+		devm_kfree(dev, (void *)clkfreq);
+	return ret;
+}
+
+static int qcom_ice_get_device_tree_data(struct platform_device *pdev,
+		struct ice_device *ice_dev)
+{
+	struct device *dev = &pdev->dev;
+	int rc = -1;
+	int irq;
+
+	ice_dev->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!ice_dev->res) {
+		pr_err("%s: No memory available for IORESOURCE\n", __func__);
+		return -ENOMEM;
+	}
+
+	ice_dev->mmio = devm_ioremap_resource(dev, ice_dev->res);
+	if (IS_ERR(ice_dev->mmio)) {
+		rc = PTR_ERR(ice_dev->mmio);
+		pr_err("%s: Error = %d mapping ICE io memory\n", __func__, rc);
+		goto out;
+	}
+
+	if (!of_parse_phandle(pdev->dev.of_node, "vdd-hba-supply", 0)) {
+		pr_err("%s: No vdd-hba-supply regulator, assuming not needed\n",
+								 __func__);
+		ice_dev->is_regulator_available = false;
+	} else {
+		ice_dev->is_regulator_available = true;
+	}
+	ice_dev->is_ice_clk_available = of_property_read_bool(
+						(&pdev->dev)->of_node,
+						"qcom,enable-ice-clk");
+
+	if (ice_dev->is_ice_clk_available) {
+		rc = qcom_ice_parse_clock_info(pdev, ice_dev);
+		if (rc) {
+			pr_err("%s: qcom_ice_parse_clock_info failed (%d)\n",
+				__func__, rc);
+			goto err_dev;
+		}
+	}
+
+	/* ICE interrupts is only relevant for v2.x */
+	irq = platform_get_irq(pdev, 0);
+	if (irq >= 0) {
+		rc = devm_request_irq(dev, irq, qcom_ice_isr, 0, dev_name(dev),
+				ice_dev);
+		if (rc) {
+			pr_err("%s: devm_request_irq irq=%d failed (%d)\n",
+				__func__, irq, rc);
+			goto err_dev;
+		}
+		ice_dev->irq = irq;
+		pr_info("ICE IRQ = %d\n", ice_dev->irq);
+	} else {
+		dev_dbg(dev, "IRQ resource not available\n");
+	}
+
+	qcom_ice_parse_ice_instance_type(pdev, ice_dev);
+
+	return 0;
+err_dev:
+	if (rc && ice_dev->mmio)
+		devm_iounmap(dev, ice_dev->mmio);
+out:
+	return rc;
+}
+
+/*
+ * ICE HW instance can exist in UFS or eMMC based storage HW
+ * Userspace does not know what kind of ICE it is dealing with.
+ * Though userspace can find which storage device it is booting
+ * from but all kind of storage types dont support ICE from
+ * beginning. So ICE device is created for user space to ping
+ * if ICE exist for that kind of storage
+ */
+static const struct file_operations qcom_ice_fops = {
+	.owner = THIS_MODULE,
+};
+
+static int register_ice_device(struct ice_device *ice_dev)
+{
+	int rc = 0;
+	unsigned int baseminor = 0;
+	unsigned int count = 1;
+	struct device *class_dev;
+	int is_sdcc_ice = !strcmp(ice_dev->ice_instance_type, "sdcc");
+
+	rc = alloc_chrdev_region(&ice_dev->device_no, baseminor, count,
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+	if (rc < 0) {
+		pr_err("alloc_chrdev_region failed %d for %s\n", rc,
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+		return rc;
+	}
+	ice_dev->driver_class = class_create(THIS_MODULE,
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+	if (IS_ERR(ice_dev->driver_class)) {
+		rc = -ENOMEM;
+		pr_err("class_create failed %d for %s\n", rc,
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+		goto exit_unreg_chrdev_region;
+	}
+	class_dev = device_create(ice_dev->driver_class, NULL,
+					ice_dev->device_no, NULL,
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+
+	if (!class_dev) {
+		pr_err("class_device_create failed %d for %s\n", rc,
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+		rc = -ENOMEM;
+		goto exit_destroy_class;
+	}
+
+	cdev_init(&ice_dev->cdev, &qcom_ice_fops);
+	ice_dev->cdev.owner = THIS_MODULE;
+
+	rc = cdev_add(&ice_dev->cdev, MKDEV(MAJOR(ice_dev->device_no), 0), 1);
+	if (rc < 0) {
+		pr_err("cdev_add failed %d for %s\n", rc,
+			is_sdcc_ice ? QCOM_SDCC_ICE_DEV : QCOM_UFS_ICE_DEV);
+		goto exit_destroy_device;
+	}
+	return  0;
+
+exit_destroy_device:
+	device_destroy(ice_dev->driver_class, ice_dev->device_no);
+
+exit_destroy_class:
+	class_destroy(ice_dev->driver_class);
+
+exit_unreg_chrdev_region:
+	unregister_chrdev_region(ice_dev->device_no, 1);
+	return rc;
+}
+
+static int qcom_ice_probe(struct platform_device *pdev)
+{
+	struct ice_device *ice_dev;
+	int rc = 0;
+
+	if (!pdev) {
+		pr_err("%s: Invalid platform_device passed\n",
+			__func__);
+		return -EINVAL;
+	}
+
+	ice_dev = kzalloc(sizeof(struct ice_device), GFP_KERNEL);
+
+	if (!ice_dev) {
+		rc = -ENOMEM;
+		pr_err("%s: Error %d allocating memory for ICE device:\n",
+			__func__, rc);
+		goto out;
+	}
+
+	ice_dev->pdev = &pdev->dev;
+	if (!ice_dev->pdev) {
+		rc = -EINVAL;
+		pr_err("%s: Invalid device passed in platform_device\n",
+								__func__);
+		goto err_ice_dev;
+	}
+
+	if (pdev->dev.of_node)
+		rc = qcom_ice_get_device_tree_data(pdev, ice_dev);
+	else {
+		rc = -EINVAL;
+		pr_err("%s: ICE device node not found\n", __func__);
+	}
+
+	if (rc)
+		goto err_ice_dev;
+
+	pr_debug("%s: Registering ICE device\n", __func__);
+	rc = register_ice_device(ice_dev);
+	if (rc) {
+		pr_err("create character device failed.\n");
+		goto err_ice_dev;
+	}
+
+	/*
+	 * If ICE is enabled here, it would be waste of power.
+	 * We would enable ICE when first request for crypto
+	 * operation arrives.
+	 */
+	ice_dev->is_ice_enabled = false;
+
+	platform_set_drvdata(pdev, ice_dev);
+	list_add_tail(&ice_dev->list, &ice_devices);
+
+	goto out;
+
+err_ice_dev:
+	kfree(ice_dev);
+out:
+	return rc;
+}
+
+static int qcom_ice_remove(struct platform_device *pdev)
+{
+	struct ice_device *ice_dev;
+
+	ice_dev = (struct ice_device *)platform_get_drvdata(pdev);
+
+	if (!ice_dev)
+		return 0;
+
+	qcom_ice_disable_intr(ice_dev);
+
+	device_init_wakeup(&pdev->dev, false);
+	if (ice_dev->mmio)
+		iounmap(ice_dev->mmio);
+
+	list_del_init(&ice_dev->list);
+	kfree(ice_dev);
+
+	return 1;
+}
+
+static int  qcom_ice_suspend(struct platform_device *pdev)
+{
+	return 0;
+}
+
+static int qcom_ice_restore_config(void)
+{
+	struct scm_desc desc = {0};
+	int ret;
+
+	/*
+	 * TZ would check KEYS_RAM_RESET_COMPLETED status bit before processing
+	 * restore config command. This would prevent two calls from HLOS to TZ
+	 * One to check KEYS_RAM_RESET_COMPLETED status bit second to restore
+	 * config
+	 */
+
+	desc.arginfo = TZ_OS_KS_RESTORE_KEY_ID_PARAM_ID;
+
+	ret = scm_call2(TZ_OS_KS_RESTORE_KEY_ID, &desc);
+
+	if (ret)
+		pr_err("%s: Error: 0x%x\n", __func__, ret);
+
+	return ret;
+}
+
+static int qcom_ice_restore_key_config(struct ice_device *ice_dev)
+{
+	struct scm_desc desc = {0};
+	int ret = -1;
+
+	/* For ice 3, key configuration needs to be restored in case of reset */
+
+	desc.arginfo = TZ_OS_KS_RESTORE_KEY_CONFIG_ID_PARAM_ID;
+
+	if (!strcmp(ice_dev->ice_instance_type, "sdcc"))
+		desc.args[0] = QCOM_ICE_SDCC;
+
+	if (!strcmp(ice_dev->ice_instance_type, "ufs"))
+		desc.args[0] = QCOM_ICE_UFS;
+
+	ret = scm_call2(TZ_OS_KS_RESTORE_KEY_CONFIG_ID, &desc);
+
+	if (ret)
+		pr_err("%s: Error:  0x%x\n", __func__, ret);
+
+	return ret;
+}
+
+static int qcom_ice_init_clocks(struct ice_device *ice)
+{
+	int ret = -EINVAL;
+	struct ice_clk_info *clki;
+	struct device *dev = ice->pdev;
+	struct list_head *head = &ice->clk_list_head;
+
+	if (!head || list_empty(head)) {
+		dev_err(dev, "%s:ICE Clock list null/empty\n", __func__);
+		goto out;
+	}
+
+	list_for_each_entry(clki, head, list) {
+		if (!clki->name)
+			continue;
+
+		clki->clk = devm_clk_get(dev, clki->name);
+		if (IS_ERR(clki->clk)) {
+			ret = PTR_ERR(clki->clk);
+			dev_err(dev, "%s: %s clk get failed, %d\n",
+					__func__, clki->name, ret);
+			goto out;
+		}
+
+		/* Not all clocks would have a rate to be set */
+		ret = 0;
+		if (clki->max_freq) {
+			ret = clk_set_rate(clki->clk, clki->max_freq);
+			if (ret) {
+				dev_err(dev,
+				"%s: %s clk set rate(%dHz) failed, %d\n",
+						__func__, clki->name,
+				clki->max_freq, ret);
+				goto out;
+			}
+			clki->curr_freq = clki->max_freq;
+			dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
+				clki->name, clk_get_rate(clki->clk));
+		}
+	}
+out:
+	return ret;
+}
+
+static int qcom_ice_enable_clocks(struct ice_device *ice, bool enable)
+{
+	int ret = 0;
+	struct ice_clk_info *clki;
+	struct device *dev = ice->pdev;
+	struct list_head *head = &ice->clk_list_head;
+
+	if (!head || list_empty(head)) {
+		dev_err(dev, "%s:ICE Clock list null/empty\n", __func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (!ice->is_ice_clk_available) {
+		dev_err(dev, "%s:ICE Clock not available\n", __func__);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	list_for_each_entry(clki, head, list) {
+		if (!clki->name)
+			continue;
+
+		if (enable)
+			ret = clk_prepare_enable(clki->clk);
+		else
+			clk_disable_unprepare(clki->clk);
+
+		if (ret) {
+			dev_err(dev, "Unable to %s ICE core clk\n",
+				enable?"enable":"disable");
+			goto out;
+		}
+	}
+out:
+	return ret;
+}
+
+static int qcom_ice_secure_ice_init(struct ice_device *ice_dev)
+{
+	/* We need to enable source for ICE secure interrupts */
+	int ret = 0;
+	u32 regval;
+
+	regval = scm_io_read((unsigned long)ice_dev->res +
+			QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_MASK);
+
+	regval &= ~QCOM_ICE_SEC_IRQ_MASK;
+	ret = scm_io_write((unsigned long)ice_dev->res +
+			QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_MASK, regval);
+
+	/*
+	 * Ensure previous instructions was completed before issuing next
+	 * ICE initialization/optimization instruction
+	 */
+	mb();
+
+	if (!ret)
+		pr_err("%s: failed(0x%x) to init secure ICE config\n",
+								__func__, ret);
+	return ret;
+}
+
+static int qcom_ice_update_sec_cfg(struct ice_device *ice_dev)
+{
+	int ret = 0, scm_ret = 0;
+
+	/* scm command buffer structure */
+	struct qcom_scm_cmd_buf {
+		unsigned int device_id;
+		unsigned int spare;
+	} cbuf = {0};
+
+	/*
+	 * Ideally, we should check ICE version to decide whether to proceed or
+	 * or not. Since version wont be available when this function is called
+	 * we need to depend upon is_ice_clk_available to decide
+	 */
+	if (ice_dev->is_ice_clk_available)
+		goto out;
+
+	/*
+	 * Store dev_id in ice_device structure so that emmc/ufs cases can be
+	 * handled properly
+	 */
+	#define RESTORE_SEC_CFG_CMD	0x2
+	#define ICE_TZ_DEV_ID	20
+
+	cbuf.device_id = ICE_TZ_DEV_ID;
+	ret = scm_restore_sec_cfg(cbuf.device_id, cbuf.spare, &scm_ret);
+	if (ret || scm_ret) {
+		pr_err("%s: failed, ret %d scm_ret %d\n",
+						__func__, ret, scm_ret);
+		if (!ret)
+			ret = scm_ret;
+	}
+out:
+
+	return ret;
+}
+
+static int qcom_ice_finish_init(struct ice_device *ice_dev)
+{
+	unsigned int reg;
+	int err = 0;
+
+	if (!ice_dev) {
+		pr_err("%s: Null data received\n", __func__);
+		err = -ENODEV;
+		goto out;
+	}
+
+	if (ice_dev->is_ice_clk_available) {
+		err = qcom_ice_init_clocks(ice_dev);
+		if (err)
+			goto out;
+
+		err = qcom_ice_bus_register(ice_dev);
+		if (err)
+			goto out;
+	}
+
+	/*
+	 * It is possible that ICE device is not probed when host is probed
+	 * This would cause host probe to be deferred. When probe for host is
+	 * deferred, it can cause power collapse for host and that can wipe
+	 * configurations of host & ice. It is prudent to restore the config
+	 */
+	err = qcom_ice_update_sec_cfg(ice_dev);
+	if (err)
+		goto out;
+
+	err = qcom_ice_verify_ice(ice_dev);
+	if (err)
+		goto out;
+
+	/* if ICE_DISABLE_FUSE is blown, return immediately
+	 * Currently, FORCE HW Keys are also disabled, since
+	 * there is no use case for their usage neither in FDE
+	 * nor in PFE
+	 */
+	reg = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_FUSE_SETTING);
+	reg &= (ICE_FUSE_SETTING_MASK |
+		ICE_FORCE_HW_KEY0_SETTING_MASK |
+		ICE_FORCE_HW_KEY1_SETTING_MASK);
+
+	if (reg) {
+		ice_dev->is_ice_disable_fuse_blown = true;
+		pr_err("%s: Error: ICE_ERROR_HW_DISABLE_FUSE_BLOWN\n",
+								__func__);
+		err = -EPERM;
+		goto out;
+	}
+
+	/* TZ side of ICE driver would handle secure init of ICE HW from v2 */
+	if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1 &&
+		!qcom_ice_secure_ice_init(ice_dev)) {
+		pr_err("%s: Error: ICE_ERROR_ICE_TZ_INIT_FAILED\n", __func__);
+		err = -EFAULT;
+		goto out;
+	}
+
+	qcom_ice_low_power_mode_enable(ice_dev);
+	qcom_ice_optimization_enable(ice_dev);
+	qcom_ice_config_proc_ignore(ice_dev);
+	qcom_ice_enable_test_bus_config(ice_dev);
+	qcom_ice_enable(ice_dev);
+	ice_dev->is_ice_enabled = true;
+	qcom_ice_enable_intr(ice_dev);
+
+out:
+	return err;
+}
+
+static int qcom_ice_init(struct platform_device *pdev,
+			void *host_controller_data,
+			ice_error_cb error_cb)
+{
+	/*
+	 * A completion event for host controller would be triggered upon
+	 * initialization completion
+	 * When ICE is initialized, it would put ICE into Global Bypass mode
+	 * When any request for data transfer is received, it would enable
+	 * the ICE for that particular request
+	 */
+	struct ice_device *ice_dev;
+
+	ice_dev = platform_get_drvdata(pdev);
+	if (!ice_dev) {
+		pr_err("%s: invalid device\n", __func__);
+		return -EINVAL;
+	}
+
+	ice_dev->error_cb = error_cb;
+	ice_dev->host_controller_data = host_controller_data;
+
+	return qcom_ice_finish_init(ice_dev);
+}
+
+static int qcom_ice_finish_power_collapse(struct ice_device *ice_dev)
+{
+	int err = 0;
+
+	if (ice_dev->is_ice_disable_fuse_blown) {
+		err = -EPERM;
+		goto out;
+	}
+
+	if (ice_dev->is_ice_enabled) {
+		/*
+		 * ICE resets into global bypass mode with optimization and
+		 * low power mode disabled. Hence we need to redo those seq's.
+		 */
+		qcom_ice_low_power_mode_enable(ice_dev);
+
+		qcom_ice_enable_test_bus_config(ice_dev);
+
+		qcom_ice_optimization_enable(ice_dev);
+		qcom_ice_enable(ice_dev);
+
+		if (ICE_REV(ice_dev->ice_hw_version, MAJOR) == 1) {
+			/*
+			 * When ICE resets, it wipes all of keys from LUTs
+			 * ICE driver should call TZ to restore keys
+			 */
+			if (qcom_ice_restore_config()) {
+				err = -EFAULT;
+				goto out;
+			}
+
+		/*
+		 * ICE looses its key configuration when UFS is reset,
+		 * restore it
+		 */
+		} else if (ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) {
+			err = qcom_ice_restore_key_config(ice_dev);
+			if (err)
+				goto out;
+
+			/*
+			 * for PFE case, clear the cached ICE key table,
+			 * this will force keys to be reconfigured
+			 * per each next transaction
+			 */
+			pfk_clear_on_reset();
+		}
+	}
+
+	ice_dev->ice_reset_complete_time = ktime_get();
+out:
+	return err;
+}
+
+static int qcom_ice_resume(struct platform_device *pdev)
+{
+	/*
+	 * ICE is power collapsed when storage controller is power collapsed
+	 * ICE resume function is responsible for:
+	 * ICE HW enabling sequence
+	 * Key restoration
+	 * A completion event should be triggered
+	 * upon resume completion
+	 * Storage driver will be fully operational only
+	 * after receiving this event
+	 */
+	struct ice_device *ice_dev;
+
+	ice_dev = platform_get_drvdata(pdev);
+
+	if (!ice_dev)
+		return -EINVAL;
+
+	if (ice_dev->is_ice_clk_available) {
+		/*
+		 * Storage is calling this function after power collapse which
+		 * would put ICE into GLOBAL_BYPASS mode. Make sure to enable
+		 * ICE
+		 */
+		qcom_ice_enable(ice_dev);
+	}
+
+	return 0;
+}
+
+static void qcom_ice_dump_test_bus(struct ice_device *ice_dev)
+{
+	u32 reg = 0x1;
+	u32 val;
+	u8 bus_selector;
+	u8 stream_selector;
+
+	pr_err("ICE TEST BUS DUMP:\n");
+
+	for (bus_selector = 0; bus_selector <= 0xF;  bus_selector++) {
+		reg = 0x1;	/* enable test bus */
+		reg |= bus_selector << 28;
+		if (bus_selector == 0xD)
+			continue;
+		qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_TEST_BUS_CONTROL);
+		/*
+		 * make sure test bus selector is written before reading
+		 * the test bus register
+		 */
+		mb();
+		val = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_TEST_BUS_REG);
+		pr_err("ICE_TEST_BUS_CONTROL: 0x%08x | ICE_TEST_BUS_REG: 0x%08x\n",
+			reg, val);
+	}
+
+	pr_err("ICE TEST BUS DUMP (ICE_STREAM1_DATAPATH_TEST_BUS):\n");
+	for (stream_selector = 0; stream_selector <= 0xF; stream_selector++) {
+		reg = 0xD0000001;	/* enable stream test bus */
+		reg |= stream_selector << 16;
+		qcom_ice_writel(ice_dev, reg, QCOM_ICE_REGS_TEST_BUS_CONTROL);
+		/*
+		 * make sure test bus selector is written before reading
+		 * the test bus register
+		 */
+		mb();
+		val = qcom_ice_readl(ice_dev, QCOM_ICE_REGS_TEST_BUS_REG);
+		pr_err("ICE_TEST_BUS_CONTROL: 0x%08x | ICE_TEST_BUS_REG: 0x%08x\n",
+			reg, val);
+	}
+}
+
+static void qcom_ice_debug(struct platform_device *pdev)
+{
+	struct ice_device *ice_dev;
+
+	if (!pdev) {
+		pr_err("%s: Invalid params passed\n", __func__);
+		goto out;
+	}
+
+	ice_dev = platform_get_drvdata(pdev);
+
+	if (!ice_dev) {
+		pr_err("%s: No ICE device available\n", __func__);
+		goto out;
+	}
+
+	if (!ice_dev->is_ice_enabled) {
+		pr_err("%s: ICE device is not enabled\n", __func__);
+		goto out;
+	}
+
+	pr_err("%s: =========== REGISTER DUMP (%p)===========\n",
+			ice_dev->ice_instance_type, ice_dev);
+
+	pr_err("%s: ICE Control: 0x%08x | ICE Reset: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_CONTROL),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_RESET));
+
+	pr_err("%s: ICE Version: 0x%08x | ICE FUSE:  0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_VERSION),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_FUSE_SETTING));
+
+	pr_err("%s: ICE Param1: 0x%08x | ICE Param2:  0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_1),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_2));
+
+	pr_err("%s: ICE Param3: 0x%08x | ICE Param4:  0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_3),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_4));
+
+	pr_err("%s: ICE Param5: 0x%08x | ICE IRQ STTS:  0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_PARAMETERS_5),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_STTS));
+
+	pr_err("%s: ICE IRQ MASK: 0x%08x | ICE IRQ CLR:  0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_MASK),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_NON_SEC_IRQ_CLR));
+
+	if (ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) {
+		pr_err("%s: ICE INVALID CCFG ERR STTS: 0x%08x\n",
+			ice_dev->ice_instance_type,
+			qcom_ice_readl(ice_dev,
+				QCOM_ICE_INVALID_CCFG_ERR_STTS));
+	}
+
+	if ((ICE_REV(ice_dev->ice_hw_version, MAJOR) > 2) ||
+		((ICE_REV(ice_dev->ice_hw_version, MAJOR) == 2) &&
+		 (ICE_REV(ice_dev->ice_hw_version, MINOR) >= 1))) {
+		pr_err("%s: ICE BIST Sts: 0x%08x | ICE Bypass Sts:  0x%08x\n",
+			ice_dev->ice_instance_type,
+			qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BIST_STATUS),
+			qcom_ice_readl(ice_dev, QCOM_ICE_REGS_BYPASS_STATUS));
+	}
+
+	pr_err("%s: ICE ADV CTRL: 0x%08x | ICE ENDIAN SWAP:  0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ADVANCED_CONTROL),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_ENDIAN_SWAP));
+
+	pr_err("%s: ICE_STM1_ERR_SYND1: 0x%08x | ICE_STM1_ERR_SYND2: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_ERROR_SYNDROME1),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_ERROR_SYNDROME2));
+
+	pr_err("%s: ICE_STM2_ERR_SYND1: 0x%08x | ICE_STM2_ERR_SYND2: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_ERROR_SYNDROME1),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_ERROR_SYNDROME2));
+
+	pr_err("%s: ICE_STM1_COUNTER1: 0x%08x | ICE_STM1_COUNTER2: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS1),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS2));
+
+	pr_err("%s: ICE_STM1_COUNTER3: 0x%08x | ICE_STM1_COUNTER4: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS3),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS4));
+
+	pr_err("%s: ICE_STM2_COUNTER1: 0x%08x | ICE_STM2_COUNTER2: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS1),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS2));
+
+	pr_err("%s: ICE_STM2_COUNTER3: 0x%08x | ICE_STM2_COUNTER4: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS3),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS4));
+
+	pr_err("%s: ICE_STM1_CTR5_MSB: 0x%08x | ICE_STM1_CTR5_LSB: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS5_MSB),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS5_LSB));
+
+	pr_err("%s: ICE_STM1_CTR6_MSB: 0x%08x | ICE_STM1_CTR6_LSB: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS6_MSB),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS6_LSB));
+
+	pr_err("%s: ICE_STM1_CTR7_MSB: 0x%08x | ICE_STM1_CTR7_LSB: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS7_MSB),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS7_LSB));
+
+	pr_err("%s: ICE_STM1_CTR8_MSB: 0x%08x | ICE_STM1_CTR8_LSB: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS8_MSB),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS8_LSB));
+
+	pr_err("%s: ICE_STM1_CTR9_MSB: 0x%08x | ICE_STM1_CTR9_LSB: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS9_MSB),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM1_COUNTERS9_LSB));
+
+	pr_err("%s: ICE_STM2_CTR5_MSB: 0x%08x | ICE_STM2_CTR5_LSB: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS5_MSB),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS5_LSB));
+
+	pr_err("%s: ICE_STM2_CTR6_MSB: 0x%08x | ICE_STM2_CTR6_LSB: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS6_MSB),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS6_LSB));
+
+	pr_err("%s: ICE_STM2_CTR7_MSB: 0x%08x | ICE_STM2_CTR7_LSB: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS7_MSB),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS7_LSB));
+
+	pr_err("%s: ICE_STM2_CTR8_MSB: 0x%08x | ICE_STM2_CTR8_LSB: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS8_MSB),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS8_LSB));
+
+	pr_err("%s: ICE_STM2_CTR9_MSB: 0x%08x | ICE_STM2_CTR9_LSB: 0x%08x\n",
+		ice_dev->ice_instance_type,
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS9_MSB),
+		qcom_ice_readl(ice_dev, QCOM_ICE_REGS_STREAM2_COUNTERS9_LSB));
+
+	qcom_ice_dump_test_bus(ice_dev);
+	pr_err("%s: ICE reset start time: %llu ICE reset done time: %llu\n",
+			ice_dev->ice_instance_type,
+		(unsigned long long)ice_dev->ice_reset_start_time.tv64,
+		(unsigned long long)ice_dev->ice_reset_complete_time.tv64);
+
+	if (ktime_to_us(ktime_sub(ice_dev->ice_reset_complete_time,
+				  ice_dev->ice_reset_start_time)) > 0)
+		pr_err("%s: Time taken for reset: %lu\n",
+			ice_dev->ice_instance_type,
+			(unsigned long)ktime_to_us(ktime_sub(
+					ice_dev->ice_reset_complete_time,
+					ice_dev->ice_reset_start_time)));
+out:
+	return;
+}
+
+static int qcom_ice_reset(struct  platform_device *pdev)
+{
+	struct ice_device *ice_dev;
+
+	ice_dev = platform_get_drvdata(pdev);
+	if (!ice_dev) {
+		pr_err("%s: INVALID ice_dev\n", __func__);
+		return -EINVAL;
+	}
+
+	ice_dev->ice_reset_start_time = ktime_get();
+
+	return qcom_ice_finish_power_collapse(ice_dev);
+}
+
+static int qcom_ice_config_start(struct platform_device *pdev,
+		struct request *req,
+		struct ice_data_setting *setting, bool async)
+{
+	struct ice_crypto_setting *crypto_data;
+	struct ice_crypto_setting pfk_crypto_data = {0};
+	union map_info *info;
+	int ret = 0;
+	bool is_pfe = false;
+
+	if (!pdev || !req || !setting) {
+		pr_err("%s: Invalid params passed\n", __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * It is not an error to have a request with no  bio
+	 * Such requests must bypass ICE. So first set bypass and then
+	 * return if bio is not available in request
+	 */
+	if (setting) {
+		setting->encr_bypass = true;
+		setting->decr_bypass = true;
+	}
+
+	if (!req->bio) {
+		/* It is not an error to have a request with no  bio */
+		return 0;
+	}
+
+	ret = pfk_load_key_start(req->bio, &pfk_crypto_data, &is_pfe, async);
+	if (is_pfe) {
+		if (ret) {
+			if (ret != -EBUSY && ret != -EAGAIN)
+				pr_err("%s error %d while configuring ice key for PFE\n",
+						__func__, ret);
+			return ret;
+		}
+
+		return qti_ice_setting_config(req, pdev,
+				&pfk_crypto_data, setting);
+	}
+
+	/*
+	 * info field in req->end_io_data could be used by mulitple dm or
+	 * non-dm entities. To ensure that we are running operation on dm
+	 * based request, check BIO_DONT_FREE flag
+	 */
+	if (bio_flagged(req->bio, BIO_INLINECRYPT)) {
+		info = dm_get_rq_mapinfo(req);
+		if (!info) {
+			pr_debug("%s info not available in request\n",
+				 __func__);
+			return 0;
+		}
+
+		crypto_data = (struct ice_crypto_setting *)info->ptr;
+		if (!crypto_data) {
+			pr_err("%s crypto_data not available in request\n",
+				 __func__);
+			return -EINVAL;
+		}
+
+		return qti_ice_setting_config(req, pdev,
+				crypto_data, setting);
+	}
+
+	/*
+	 * It is not an error. If target is not req-crypt based, all request
+	 * from storage driver would come here to check if there is any ICE
+	 * setting required
+	 */
+	return 0;
+}
+EXPORT_SYMBOL(qcom_ice_config_start);
+
+static int qcom_ice_config_end(struct request *req)
+{
+	int ret = 0;
+	bool is_pfe = false;
+
+	if (!req) {
+		pr_err("%s: Invalid params passed\n", __func__);
+		return -EINVAL;
+	}
+
+	if (!req->bio) {
+		/* It is not an error to have a request with no  bio */
+		return 0;
+	}
+
+	ret = pfk_load_key_end(req->bio, &is_pfe);
+	if (is_pfe) {
+		if (ret != 0)
+			pr_err("%s error %d while end configuring ice key for PFE\n",
+								__func__, ret);
+		return ret;
+	}
+
+
+	return 0;
+}
+EXPORT_SYMBOL(qcom_ice_config_end);
+
+
+static int qcom_ice_status(struct platform_device *pdev)
+{
+	struct ice_device *ice_dev;
+	unsigned int test_bus_reg_status;
+
+	if (!pdev) {
+		pr_err("%s: Invalid params passed\n", __func__);
+		return -EINVAL;
+	}
+
+	ice_dev = platform_get_drvdata(pdev);
+
+	if (!ice_dev)
+		return -ENODEV;
+
+	if (!ice_dev->is_ice_enabled)
+		return -ENODEV;
+
+	test_bus_reg_status = qcom_ice_readl(ice_dev,
+					QCOM_ICE_REGS_TEST_BUS_REG);
+
+	return !!(test_bus_reg_status & QCOM_ICE_TEST_BUS_REG_NON_SECURE_INTR);
+
+}
+
+struct qcom_ice_variant_ops qcom_ice_ops = {
+	.name             = "qcom",
+	.init             = qcom_ice_init,
+	.reset            = qcom_ice_reset,
+	.resume           = qcom_ice_resume,
+	.suspend          = qcom_ice_suspend,
+	.config_start     = qcom_ice_config_start,
+	.config_end       = qcom_ice_config_end,
+	.status           = qcom_ice_status,
+	.debug            = qcom_ice_debug,
+};
+
+struct platform_device *qcom_ice_get_pdevice(struct device_node *node)
+{
+	struct platform_device *ice_pdev = NULL;
+	struct ice_device *ice_dev = NULL;
+
+	if (!node) {
+		pr_err("%s: invalid node %p", __func__, node);
+		goto out;
+	}
+
+	if (!of_device_is_available(node)) {
+		pr_err("%s: device unavailable\n", __func__);
+		goto out;
+	}
+
+	if (list_empty(&ice_devices)) {
+		pr_err("%s: invalid device list\n", __func__);
+		ice_pdev = ERR_PTR(-EPROBE_DEFER);
+		goto out;
+	}
+
+	list_for_each_entry(ice_dev, &ice_devices, list) {
+		if (ice_dev->pdev->of_node == node) {
+			pr_info("%s: found ice device %p\n", __func__, ice_dev);
+			break;
+		}
+	}
+
+	ice_pdev = to_platform_device(ice_dev->pdev);
+	pr_info("%s: matching platform device %p\n", __func__, ice_pdev);
+out:
+	return ice_pdev;
+}
+
+static struct ice_device *get_ice_device_from_storage_type
+					(const char *storage_type)
+{
+	struct ice_device *ice_dev = NULL;
+
+	if (list_empty(&ice_devices)) {
+		pr_err("%s: invalid device list\n", __func__);
+		ice_dev = ERR_PTR(-EPROBE_DEFER);
+		goto out;
+	}
+
+	list_for_each_entry(ice_dev, &ice_devices, list) {
+		if (!strcmp(ice_dev->ice_instance_type, storage_type)) {
+			pr_info("%s: found ice device %p\n", __func__, ice_dev);
+			break;
+		}
+	}
+out:
+	return ice_dev;
+}
+
+static int enable_ice_setup(struct ice_device *ice_dev)
+{
+	int ret = -1, vote;
+
+	/* Setup Regulator */
+	if (ice_dev->is_regulator_available) {
+		if (qcom_ice_get_vreg(ice_dev)) {
+			pr_err("%s: Could not get regulator\n", __func__);
+			goto out;
+		}
+		ret = regulator_enable(ice_dev->reg);
+		if (ret) {
+			pr_err("%s:%p: Could not enable regulator\n",
+					__func__, ice_dev);
+			goto out;
+		}
+	}
+
+	/* Setup Clocks */
+	if (qcom_ice_enable_clocks(ice_dev, true)) {
+		pr_err("%s:%p:%s Could not enable clocks\n", __func__,
+				ice_dev, ice_dev->ice_instance_type);
+		goto out_reg;
+	}
+
+	/* Setup Bus Vote */
+	vote = qcom_ice_get_bus_vote(ice_dev, "MAX");
+	if (vote < 0)
+		goto out_clocks;
+
+	ret = qcom_ice_set_bus_vote(ice_dev, vote);
+	if (ret) {
+		pr_err("%s:%p: failed %d\n", __func__, ice_dev, ret);
+		goto out_clocks;
+	}
+
+	return ret;
+
+out_clocks:
+	qcom_ice_enable_clocks(ice_dev, false);
+out_reg:
+	if (ice_dev->is_regulator_available) {
+		if (qcom_ice_get_vreg(ice_dev)) {
+			pr_err("%s: Could not get regulator\n", __func__);
+			goto out;
+		}
+		ret = regulator_disable(ice_dev->reg);
+		if (ret) {
+			pr_err("%s:%pK: Could not disable regulator\n",
+					__func__, ice_dev);
+			goto out;
+		}
+	}
+out:
+	return ret;
+}
+
+static int disable_ice_setup(struct ice_device *ice_dev)
+{
+	int ret = -1, vote;
+
+	/* Setup Bus Vote */
+	vote = qcom_ice_get_bus_vote(ice_dev, "MIN");
+	if (vote < 0) {
+		pr_err("%s:%p: Unable to get bus vote\n", __func__, ice_dev);
+		goto out_disable_clocks;
+	}
+
+	ret = qcom_ice_set_bus_vote(ice_dev, vote);
+	if (ret)
+		pr_err("%s:%p: failed %d\n", __func__, ice_dev, ret);
+
+out_disable_clocks:
+
+	/* Setup Clocks */
+	if (qcom_ice_enable_clocks(ice_dev, false))
+		pr_err("%s:%p:%s Could not disable clocks\n", __func__,
+				ice_dev, ice_dev->ice_instance_type);
+
+	/* Setup Regulator */
+	if (ice_dev->is_regulator_available) {
+		if (qcom_ice_get_vreg(ice_dev)) {
+			pr_err("%s: Could not get regulator\n", __func__);
+			goto out;
+		}
+		ret = regulator_disable(ice_dev->reg);
+		if (ret) {
+			pr_err("%s:%p: Could not disable regulator\n",
+					__func__, ice_dev);
+			goto out;
+		}
+	}
+out:
+	return ret;
+}
+
+int qcom_ice_setup_ice_hw(const char *storage_type, int enable)
+{
+	int ret = -1;
+	struct ice_device *ice_dev = NULL;
+
+	ice_dev = get_ice_device_from_storage_type(storage_type);
+	if (ice_dev == ERR_PTR(-EPROBE_DEFER))
+		return -EPROBE_DEFER;
+
+	if (!ice_dev)
+		return ret;
+
+	if (enable)
+		return enable_ice_setup(ice_dev);
+	else
+		return disable_ice_setup(ice_dev);
+}
+
+struct qcom_ice_variant_ops *qcom_ice_get_variant_ops(struct device_node *node)
+{
+	return &qcom_ice_ops;
+}
+EXPORT_SYMBOL(qcom_ice_get_variant_ops);
+
+/* Following struct is required to match device with driver from dts file */
+static const struct of_device_id qcom_ice_match[] = {
+	{ .compatible = "qcom,ice" },
+	{},
+};
+MODULE_DEVICE_TABLE(of, qcom_ice_match);
+
+static struct platform_driver qcom_ice_driver = {
+	.probe          = qcom_ice_probe,
+	.remove         = qcom_ice_remove,
+	.driver         = {
+		.owner  = THIS_MODULE,
+		.name   = "qcom_ice",
+		.of_match_table = qcom_ice_match,
+	},
+};
+module_platform_driver(qcom_ice_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI Inline Crypto Engine driver");
diff --git a/drivers/crypto/msm/iceregs.h b/drivers/crypto/msm/iceregs.h
new file mode 100644
index 0000000..4b63e7a
--- /dev/null
+++ b/drivers/crypto/msm/iceregs.h
@@ -0,0 +1,159 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QCOM_INLINE_CRYPTO_ENGINE_REGS_H_
+#define _QCOM_INLINE_CRYPTO_ENGINE_REGS_H_
+
+/* Register bits for ICE version */
+#define ICE_CORE_CURRENT_MAJOR_VERSION 0x03
+
+#define ICE_CORE_STEP_REV_MASK		0xFFFF
+#define ICE_CORE_STEP_REV		0 /* bit 15-0 */
+#define ICE_CORE_MAJOR_REV_MASK		0xFF000000
+#define ICE_CORE_MAJOR_REV		24 /* bit 31-24 */
+#define ICE_CORE_MINOR_REV_MASK		0xFF0000
+#define ICE_CORE_MINOR_REV		16 /* bit 23-16 */
+
+#define ICE_BIST_STATUS_MASK		(0xF0000000)	/* bits 28-31 */
+
+#define ICE_FUSE_SETTING_MASK			0x1
+#define ICE_FORCE_HW_KEY0_SETTING_MASK		0x2
+#define ICE_FORCE_HW_KEY1_SETTING_MASK		0x4
+
+/* QCOM ICE Registers from SWI */
+#define QCOM_ICE_REGS_CONTROL			0x0000
+#define QCOM_ICE_REGS_RESET			0x0004
+#define QCOM_ICE_REGS_VERSION			0x0008
+#define QCOM_ICE_REGS_FUSE_SETTING		0x0010
+#define QCOM_ICE_REGS_PARAMETERS_1		0x0014
+#define QCOM_ICE_REGS_PARAMETERS_2		0x0018
+#define QCOM_ICE_REGS_PARAMETERS_3		0x001C
+#define QCOM_ICE_REGS_PARAMETERS_4		0x0020
+#define QCOM_ICE_REGS_PARAMETERS_5		0x0024
+
+
+/* QCOM ICE v3.X only */
+#define QCOM_ICE_GENERAL_ERR_STTS		0x0040
+#define QCOM_ICE_INVALID_CCFG_ERR_STTS		0x0030
+#define QCOM_ICE_GENERAL_ERR_MASK		0x0044
+
+
+/* QCOM ICE v2.X only */
+#define QCOM_ICE_REGS_NON_SEC_IRQ_STTS		0x0040
+#define QCOM_ICE_REGS_NON_SEC_IRQ_MASK		0x0044
+
+
+#define QCOM_ICE_REGS_NON_SEC_IRQ_CLR		0x0048
+#define QCOM_ICE_REGS_STREAM1_ERROR_SYNDROME1	0x0050
+#define QCOM_ICE_REGS_STREAM1_ERROR_SYNDROME2	0x0054
+#define QCOM_ICE_REGS_STREAM2_ERROR_SYNDROME1	0x0058
+#define QCOM_ICE_REGS_STREAM2_ERROR_SYNDROME2	0x005C
+#define QCOM_ICE_REGS_STREAM1_BIST_ERROR_VEC	0x0060
+#define QCOM_ICE_REGS_STREAM2_BIST_ERROR_VEC	0x0064
+#define QCOM_ICE_REGS_STREAM1_BIST_FINISH_VEC	0x0068
+#define QCOM_ICE_REGS_STREAM2_BIST_FINISH_VEC	0x006C
+#define QCOM_ICE_REGS_BIST_STATUS		0x0070
+#define QCOM_ICE_REGS_BYPASS_STATUS		0x0074
+#define QCOM_ICE_REGS_ADVANCED_CONTROL		0x1000
+#define QCOM_ICE_REGS_ENDIAN_SWAP		0x1004
+#define QCOM_ICE_REGS_TEST_BUS_CONTROL		0x1010
+#define QCOM_ICE_REGS_TEST_BUS_REG		0x1014
+#define QCOM_ICE_REGS_STREAM1_COUNTERS1		0x1100
+#define QCOM_ICE_REGS_STREAM1_COUNTERS2		0x1104
+#define QCOM_ICE_REGS_STREAM1_COUNTERS3		0x1108
+#define QCOM_ICE_REGS_STREAM1_COUNTERS4		0x110C
+#define QCOM_ICE_REGS_STREAM1_COUNTERS5_MSB	0x1110
+#define QCOM_ICE_REGS_STREAM1_COUNTERS5_LSB	0x1114
+#define QCOM_ICE_REGS_STREAM1_COUNTERS6_MSB	0x1118
+#define QCOM_ICE_REGS_STREAM1_COUNTERS6_LSB	0x111C
+#define QCOM_ICE_REGS_STREAM1_COUNTERS7_MSB	0x1120
+#define QCOM_ICE_REGS_STREAM1_COUNTERS7_LSB	0x1124
+#define QCOM_ICE_REGS_STREAM1_COUNTERS8_MSB	0x1128
+#define QCOM_ICE_REGS_STREAM1_COUNTERS8_LSB	0x112C
+#define QCOM_ICE_REGS_STREAM1_COUNTERS9_MSB	0x1130
+#define QCOM_ICE_REGS_STREAM1_COUNTERS9_LSB	0x1134
+#define QCOM_ICE_REGS_STREAM2_COUNTERS1		0x1200
+#define QCOM_ICE_REGS_STREAM2_COUNTERS2		0x1204
+#define QCOM_ICE_REGS_STREAM2_COUNTERS3		0x1208
+#define QCOM_ICE_REGS_STREAM2_COUNTERS4		0x120C
+#define QCOM_ICE_REGS_STREAM2_COUNTERS5_MSB	0x1210
+#define QCOM_ICE_REGS_STREAM2_COUNTERS5_LSB	0x1214
+#define QCOM_ICE_REGS_STREAM2_COUNTERS6_MSB	0x1218
+#define QCOM_ICE_REGS_STREAM2_COUNTERS6_LSB	0x121C
+#define QCOM_ICE_REGS_STREAM2_COUNTERS7_MSB	0x1220
+#define QCOM_ICE_REGS_STREAM2_COUNTERS7_LSB	0x1224
+#define QCOM_ICE_REGS_STREAM2_COUNTERS8_MSB	0x1228
+#define QCOM_ICE_REGS_STREAM2_COUNTERS8_LSB	0x122C
+#define QCOM_ICE_REGS_STREAM2_COUNTERS9_MSB	0x1230
+#define QCOM_ICE_REGS_STREAM2_COUNTERS9_LSB	0x1234
+
+#define QCOM_ICE_STREAM1_PREMATURE_LBA_CHANGE		(1L << 0)
+#define QCOM_ICE_STREAM2_PREMATURE_LBA_CHANGE		(1L << 1)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_LBO		(1L << 2)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_LBO		(1L << 3)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_DUN		(1L << 4)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_DUN		(1L << 5)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_DUS		(1L << 6)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_DUS		(1L << 7)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_DBO		(1L << 8)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_DBO		(1L << 9)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_ENC_SEL		(1L << 10)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_ENC_SEL		(1L << 11)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_CONF_IDX		(1L << 12)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_CONF_IDX		(1L << 13)
+#define QCOM_ICE_STREAM1_NOT_EXPECTED_NEW_TRNS		(1L << 14)
+#define QCOM_ICE_STREAM2_NOT_EXPECTED_NEW_TRNS		(1L << 15)
+
+#define QCOM_ICE_NON_SEC_IRQ_MASK				\
+			(QCOM_ICE_STREAM1_PREMATURE_LBA_CHANGE |\
+			 QCOM_ICE_STREAM2_PREMATURE_LBA_CHANGE |\
+			 QCOM_ICE_STREAM1_NOT_EXPECTED_LBO |\
+			 QCOM_ICE_STREAM2_NOT_EXPECTED_LBO |\
+			 QCOM_ICE_STREAM1_NOT_EXPECTED_DUN |\
+			 QCOM_ICE_STREAM2_NOT_EXPECTED_DUN |\
+			 QCOM_ICE_STREAM2_NOT_EXPECTED_DUN |\
+			 QCOM_ICE_STREAM2_NOT_EXPECTED_DUS |\
+			 QCOM_ICE_STREAM1_NOT_EXPECTED_DBO |\
+			 QCOM_ICE_STREAM2_NOT_EXPECTED_DBO |\
+			 QCOM_ICE_STREAM1_NOT_EXPECTED_ENC_SEL |\
+			 QCOM_ICE_STREAM2_NOT_EXPECTED_ENC_SEL |\
+			 QCOM_ICE_STREAM1_NOT_EXPECTED_CONF_IDX |\
+			 QCOM_ICE_STREAM1_NOT_EXPECTED_NEW_TRNS |\
+			 QCOM_ICE_STREAM2_NOT_EXPECTED_NEW_TRNS)
+
+/* QCOM ICE registers from secure side */
+#define QCOM_ICE_TEST_BUS_REG_SECURE_INTR            (1L << 28)
+#define QCOM_ICE_TEST_BUS_REG_NON_SECURE_INTR        (1L << 2)
+
+#define QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_STTS	0x2050
+#define QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_MASK	0x2054
+#define QCOM_ICE_LUT_KEYS_ICE_SEC_IRQ_CLR	0x2058
+
+#define QCOM_ICE_STREAM1_PARTIALLY_SET_KEY_USED		(1L << 0)
+#define QCOM_ICE_STREAM2_PARTIALLY_SET_KEY_USED		(1L << 1)
+#define QCOM_ICE_QCOMC_DBG_OPEN_EVENT			(1L << 30)
+#define QCOM_ICE_KEYS_RAM_RESET_COMPLETED		(1L << 31)
+
+#define QCOM_ICE_SEC_IRQ_MASK					  \
+			(QCOM_ICE_STREAM1_PARTIALLY_SET_KEY_USED |\
+			 QCOM_ICE_STREAM2_PARTIALLY_SET_KEY_USED |\
+			 QCOM_ICE_QCOMC_DBG_OPEN_EVENT |	  \
+			 QCOM_ICE_KEYS_RAM_RESET_COMPLETED)
+
+
+#define qcom_ice_writel(ice, val, reg)	\
+	writel_relaxed((val), (ice)->mmio + (reg))
+#define qcom_ice_readl(ice, reg)	\
+	readl_relaxed((ice)->mmio + (reg))
+
+
+#endif /* _QCOM_INLINE_CRYPTO_ENGINE_REGS_H_ */
diff --git a/drivers/crypto/msm/ota_crypto.c b/drivers/crypto/msm/ota_crypto.c
new file mode 100644
index 0000000..3a2a51d
--- /dev/null
+++ b/drivers/crypto/msm/ota_crypto.c
@@ -0,0 +1,974 @@
+/* Copyright (c) 2010-2014,2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* QTI Over the Air (OTA) Crypto driver */
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/cache.h>
+
+
+#include <linux/qcota.h>
+#include "qce.h"
+#include "qce_ota.h"
+
+enum qce_ota_oper_enum {
+	QCE_OTA_F8_OPER   = 0,
+	QCE_OTA_MPKT_F8_OPER = 1,
+	QCE_OTA_F9_OPER  = 2,
+	QCE_OTA_VAR_MPKT_F8_OPER = 3,
+	QCE_OTA_OPER_LAST
+};
+
+struct ota_dev_control;
+
+struct ota_async_req {
+	struct list_head rlist;
+	struct completion complete;
+	int err;
+	enum qce_ota_oper_enum op;
+	union {
+		struct qce_f9_req f9_req;
+		struct qce_f8_req f8_req;
+		struct qce_f8_multi_pkt_req f8_mp_req;
+		struct qce_f8_varible_multi_pkt_req f8_v_mp_req;
+	} req;
+	unsigned int steps;
+	struct ota_qce_dev  *pqce;
+};
+
+/*
+ * Register ourselves as a misc device to be able to access the ota
+ * from userspace.
+ */
+
+
+#define QCOTA_DEV	"qcota"
+
+
+struct ota_dev_control {
+
+	/* misc device */
+	struct miscdevice miscdevice;
+	struct list_head ready_commands;
+	unsigned int magic;
+	struct list_head qce_dev;
+	spinlock_t lock;
+	struct mutex register_lock;
+	bool registered;
+	uint32_t total_units;
+};
+
+struct ota_qce_dev {
+	struct list_head qlist;
+	/* qce handle */
+	void *qce;
+
+	/* platform device */
+	struct platform_device *pdev;
+
+	struct ota_async_req *active_command;
+	struct tasklet_struct done_tasklet;
+	struct ota_dev_control *podev;
+	uint32_t unit;
+	u64 total_req;
+	u64 err_req;
+};
+
+#define OTA_MAGIC 0x4f544143
+
+static long qcota_ioctl(struct file *file,
+			  unsigned int cmd, unsigned long arg);
+static int qcota_open(struct inode *inode, struct file *file);
+static int qcota_release(struct inode *inode, struct file *file);
+static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq);
+static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv, int ret);
+
+static const struct file_operations qcota_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = qcota_ioctl,
+	.open = qcota_open,
+	.release = qcota_release,
+};
+
+static struct ota_dev_control qcota_dev = {
+	.miscdevice = {
+			.minor = MISC_DYNAMIC_MINOR,
+			.name = "qcota0",
+			.fops = &qcota_fops,
+	},
+	.magic = OTA_MAGIC,
+};
+
+#define DEBUG_MAX_FNAME  16
+#define DEBUG_MAX_RW_BUF 1024
+
+struct qcota_stat {
+	u64 f8_req;
+	u64 f8_mp_req;
+	u64 f8_v_mp_req;
+	u64 f9_req;
+	u64 f8_op_success;
+	u64 f8_op_fail;
+	u64 f8_mp_op_success;
+	u64 f8_mp_op_fail;
+	u64 f8_v_mp_op_success;
+	u64 f8_v_mp_op_fail;
+	u64 f9_op_success;
+	u64 f9_op_fail;
+};
+static struct qcota_stat _qcota_stat;
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static int _debug_qcota;
+
+static struct ota_dev_control *qcota_control(void)
+{
+
+	return &qcota_dev;
+}
+
+static int qcota_open(struct inode *inode, struct file *file)
+{
+	struct ota_dev_control *podev;
+
+	podev = qcota_control();
+	if (podev == NULL) {
+		pr_err("%s: no such device %d\n", __func__,
+				MINOR(inode->i_rdev));
+		return -ENOENT;
+	}
+
+	file->private_data = podev;
+
+	return 0;
+}
+
+static int qcota_release(struct inode *inode, struct file *file)
+{
+	struct ota_dev_control *podev;
+
+	podev =  file->private_data;
+
+	if (podev != NULL && podev->magic != OTA_MAGIC) {
+		pr_err("%s: invalid handle %p\n",
+			__func__, podev);
+	}
+
+	file->private_data = NULL;
+
+	return 0;
+}
+
+static bool  _next_v_mp_req(struct ota_async_req *areq)
+{
+	unsigned char *p;
+
+	if (areq->err)
+		return false;
+	if (++areq->steps >= areq->req.f8_v_mp_req.num_pkt)
+		return false;
+
+	p = areq->req.f8_v_mp_req.qce_f8_req.data_in;
+	p += areq->req.f8_v_mp_req.qce_f8_req.data_len;
+	p = (uint8_t *) ALIGN(((uintptr_t)p), L1_CACHE_BYTES);
+
+	areq->req.f8_v_mp_req.qce_f8_req.data_out = p;
+	areq->req.f8_v_mp_req.qce_f8_req.data_in = p;
+	areq->req.f8_v_mp_req.qce_f8_req.data_len =
+		areq->req.f8_v_mp_req.cipher_iov[areq->steps].size;
+
+	areq->req.f8_v_mp_req.qce_f8_req.count_c++;
+	return true;
+}
+
+static void req_done(unsigned long data)
+{
+	struct ota_qce_dev *pqce = (struct ota_qce_dev *)data;
+	struct ota_dev_control *podev = pqce->podev;
+	struct ota_async_req *areq;
+	unsigned long flags;
+	struct ota_async_req *new_req = NULL;
+	int ret = 0;
+	bool schedule = true;
+
+	spin_lock_irqsave(&podev->lock, flags);
+	areq = pqce->active_command;
+	if (unlikely(areq == NULL))
+		pr_err("ota_crypto: req_done, no active request\n");
+	else if (areq->op == QCE_OTA_VAR_MPKT_F8_OPER) {
+		if (_next_v_mp_req(areq)) {
+			/* execute next subcommand */
+			spin_unlock_irqrestore(&podev->lock, flags);
+			ret = start_req(pqce, areq);
+			if (unlikely(ret)) {
+				areq->err = ret;
+				schedule = true;
+				spin_lock_irqsave(&podev->lock, flags);
+			} else {
+				areq = NULL;
+				schedule = false;
+			}
+		} else {
+			/* done with this variable mp req */
+			schedule = true;
+		}
+	}
+	while (schedule) {
+		if (!list_empty(&podev->ready_commands)) {
+			new_req = container_of(podev->ready_commands.next,
+						struct ota_async_req, rlist);
+			list_del(&new_req->rlist);
+			pqce->active_command = new_req;
+			spin_unlock_irqrestore(&podev->lock, flags);
+
+			new_req->err = 0;
+			/* start a new request */
+			ret = start_req(pqce, new_req);
+			if (unlikely(new_req && ret)) {
+				new_req->err = ret;
+				complete(&new_req->complete);
+				ret = 0;
+				new_req = NULL;
+				spin_lock_irqsave(&podev->lock, flags);
+			} else {
+				schedule = false;
+			}
+		} else {
+			pqce->active_command = NULL;
+			spin_unlock_irqrestore(&podev->lock, flags);
+			schedule = false;
+		};
+	}
+	if (areq)
+		complete(&areq->complete);
+}
+
+static void f9_cb(void *cookie, unsigned char *icv, unsigned char *iv,
+	int ret)
+{
+	struct ota_async_req *areq = (struct ota_async_req *) cookie;
+	struct ota_qce_dev *pqce;
+
+	pqce = areq->pqce;
+	areq->req.f9_req.mac_i  = *((uint32_t *)icv);
+
+	if (ret) {
+		pqce->err_req++;
+		areq->err = -ENXIO;
+	} else
+		areq->err = 0;
+
+	tasklet_schedule(&pqce->done_tasklet);
+}
+
+static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv,
+	int ret)
+{
+	struct ota_async_req *areq = (struct ota_async_req *) cookie;
+	struct ota_qce_dev *pqce;
+
+	pqce = areq->pqce;
+
+	if (ret) {
+		pqce->err_req++;
+		areq->err = -ENXIO;
+	} else {
+		areq->err = 0;
+	}
+
+	tasklet_schedule(&pqce->done_tasklet);
+}
+
+static int start_req(struct ota_qce_dev *pqce, struct ota_async_req *areq)
+{
+	struct qce_f9_req *pf9;
+	struct qce_f8_multi_pkt_req *p_mp_f8;
+	struct qce_f8_req *pf8;
+	int ret = 0;
+
+	/* command should be on the podev->active_command */
+	areq->pqce = pqce;
+
+	switch (areq->op) {
+	case QCE_OTA_F8_OPER:
+		pf8 = &areq->req.f8_req;
+		ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
+		break;
+	case QCE_OTA_MPKT_F8_OPER:
+		p_mp_f8 = &areq->req.f8_mp_req;
+		ret = qce_f8_multi_pkt_req(pqce->qce, p_mp_f8, areq, f8_cb);
+		break;
+
+	case QCE_OTA_F9_OPER:
+		pf9 = &areq->req.f9_req;
+		ret =  qce_f9_req(pqce->qce, pf9, areq, f9_cb);
+		break;
+
+	case QCE_OTA_VAR_MPKT_F8_OPER:
+		pf8 = &areq->req.f8_v_mp_req.qce_f8_req;
+		ret = qce_f8_req(pqce->qce, pf8, areq, f8_cb);
+		break;
+
+	default:
+		ret = -ENOTSUPP;
+		break;
+	};
+	areq->err = ret;
+	pqce->total_req++;
+	if (ret)
+		pqce->err_req++;
+	return ret;
+}
+
+static struct ota_qce_dev *schedule_qce(struct ota_dev_control *podev)
+{
+	/* do this function with spinlock set */
+	struct ota_qce_dev *p;
+
+	if (unlikely(list_empty(&podev->qce_dev))) {
+		pr_err("%s: no valid qce to schedule\n", __func__);
+		return NULL;
+	}
+
+	list_for_each_entry(p, &podev->qce_dev, qlist) {
+		if (p->active_command == NULL)
+			return p;
+	}
+	return NULL;
+}
+
+static int submit_req(struct ota_async_req *areq, struct ota_dev_control *podev)
+{
+	unsigned long flags;
+	int ret = 0;
+	struct qcota_stat *pstat;
+	struct ota_qce_dev *pqce;
+
+	areq->err = 0;
+
+	spin_lock_irqsave(&podev->lock, flags);
+	pqce = schedule_qce(podev);
+	if (pqce) {
+		pqce->active_command = areq;
+		spin_unlock_irqrestore(&podev->lock, flags);
+
+		ret = start_req(pqce, areq);
+		if (ret != 0) {
+			spin_lock_irqsave(&podev->lock, flags);
+			pqce->active_command = NULL;
+			spin_unlock_irqrestore(&podev->lock, flags);
+		}
+
+	} else {
+		list_add_tail(&areq->rlist, &podev->ready_commands);
+		spin_unlock_irqrestore(&podev->lock, flags);
+	}
+
+	if (ret == 0)
+		wait_for_completion(&areq->complete);
+
+	pstat = &_qcota_stat;
+	switch (areq->op) {
+	case QCE_OTA_F8_OPER:
+		if (areq->err)
+			pstat->f8_op_fail++;
+		else
+			pstat->f8_op_success++;
+		break;
+
+	case QCE_OTA_MPKT_F8_OPER:
+
+		if (areq->err)
+			pstat->f8_mp_op_fail++;
+		else
+			pstat->f8_mp_op_success++;
+		break;
+
+	case QCE_OTA_F9_OPER:
+		if (areq->err)
+			pstat->f9_op_fail++;
+		else
+			pstat->f9_op_success++;
+		break;
+	case QCE_OTA_VAR_MPKT_F8_OPER:
+	default:
+		if (areq->err)
+			pstat->f8_v_mp_op_fail++;
+		else
+			pstat->f8_v_mp_op_success++;
+		break;
+	};
+
+	return areq->err;
+}
+
+static long qcota_ioctl(struct file *file,
+			  unsigned int cmd, unsigned long arg)
+{
+	int err = 0;
+	struct ota_dev_control *podev;
+	uint8_t *user_src;
+	uint8_t *user_dst;
+	uint8_t *k_buf = NULL;
+	struct ota_async_req areq;
+	uint32_t total, temp;
+	struct qcota_stat *pstat;
+	int i;
+	uint8_t *p = NULL;
+
+	podev =  file->private_data;
+	if (podev == NULL || podev->magic != OTA_MAGIC) {
+		pr_err("%s: invalid handle %p\n",
+			__func__, podev);
+		return -ENOENT;
+	}
+
+	/* Verify user arguments. */
+	if (_IOC_TYPE(cmd) != QCOTA_IOC_MAGIC)
+		return -ENOTTY;
+
+	init_completion(&areq.complete);
+
+	pstat = &_qcota_stat;
+
+	switch (cmd) {
+	case QCOTA_F9_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+			       sizeof(struct qce_f9_req)))
+			return -EFAULT;
+		if (__copy_from_user(&areq.req.f9_req, (void __user *)arg,
+				     sizeof(struct qce_f9_req)))
+			return -EFAULT;
+
+		user_src = areq.req.f9_req.message;
+		if (!access_ok(VERIFY_READ, (void __user *)user_src,
+			       areq.req.f9_req.msize))
+			return -EFAULT;
+
+		if (areq.req.f9_req.msize == 0)
+			return 0;
+		k_buf = kmalloc(areq.req.f9_req.msize, GFP_KERNEL);
+		if (k_buf == NULL)
+			return -ENOMEM;
+
+		if (__copy_from_user(k_buf, (void __user *)user_src,
+				areq.req.f9_req.msize)) {
+			kfree(k_buf);
+			return -EFAULT;
+		}
+
+		areq.req.f9_req.message = k_buf;
+		areq.op = QCE_OTA_F9_OPER;
+
+		pstat->f9_req++;
+		err = submit_req(&areq, podev);
+
+		areq.req.f9_req.message = user_src;
+		if (err == 0 && __copy_to_user((void __user *)arg,
+				&areq.req.f9_req, sizeof(struct qce_f9_req))) {
+			err = -EFAULT;
+		}
+		kfree(k_buf);
+		break;
+
+	case QCOTA_F8_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+			       sizeof(struct qce_f8_req)))
+			return -EFAULT;
+		if (__copy_from_user(&areq.req.f8_req, (void __user *)arg,
+				     sizeof(struct qce_f8_req)))
+			return -EFAULT;
+		total = areq.req.f8_req.data_len;
+		user_src = areq.req.f8_req.data_in;
+		if (user_src != NULL) {
+			if (!access_ok(VERIFY_READ, (void __user *)
+					user_src, total))
+				return -EFAULT;
+
+		};
+
+		user_dst = areq.req.f8_req.data_out;
+		if (!access_ok(VERIFY_WRITE, (void __user *)
+				user_dst, total))
+			return -EFAULT;
+
+		if (!total)
+			return 0;
+		k_buf = kmalloc(total, GFP_KERNEL);
+		if (k_buf == NULL)
+			return -ENOMEM;
+
+		/* k_buf returned from kmalloc should be cache line aligned */
+		if (user_src && __copy_from_user(k_buf,
+				(void __user *)user_src, total)) {
+			kfree(k_buf);
+			return -EFAULT;
+		}
+
+		if (user_src)
+			areq.req.f8_req.data_in = k_buf;
+		else
+			areq.req.f8_req.data_in = NULL;
+		areq.req.f8_req.data_out = k_buf;
+
+		areq.op = QCE_OTA_F8_OPER;
+
+		pstat->f8_req++;
+		err = submit_req(&areq, podev);
+
+		if (err == 0 && __copy_to_user(user_dst, k_buf, total))
+			err = -EFAULT;
+		kfree(k_buf);
+
+		break;
+
+	case QCOTA_F8_MPKT_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+			       sizeof(struct qce_f8_multi_pkt_req)))
+			return -EFAULT;
+		if (__copy_from_user(&areq.req.f8_mp_req, (void __user *)arg,
+				     sizeof(struct qce_f8_multi_pkt_req)))
+			return -EFAULT;
+		temp = areq.req.f8_mp_req.qce_f8_req.data_len;
+		if (temp < (uint32_t) areq.req.f8_mp_req.cipher_start +
+				 areq.req.f8_mp_req.cipher_size)
+			return -EINVAL;
+		total = (uint32_t) areq.req.f8_mp_req.num_pkt *
+				areq.req.f8_mp_req.qce_f8_req.data_len;
+
+		user_src = areq.req.f8_mp_req.qce_f8_req.data_in;
+		if (!access_ok(VERIFY_READ, (void __user *)
+				user_src, total))
+			return -EFAULT;
+
+		user_dst = areq.req.f8_mp_req.qce_f8_req.data_out;
+		if (!access_ok(VERIFY_WRITE, (void __user *)
+				user_dst, total))
+			return -EFAULT;
+
+		if (!total)
+			return 0;
+		k_buf = kmalloc(total, GFP_KERNEL);
+		if (k_buf == NULL)
+			return -ENOMEM;
+		/* k_buf returned from kmalloc should be cache line aligned */
+		if (__copy_from_user(k_buf, (void __user *)user_src, total)) {
+			kfree(k_buf);
+
+			return -EFAULT;
+		}
+
+		areq.req.f8_mp_req.qce_f8_req.data_out = k_buf;
+		areq.req.f8_mp_req.qce_f8_req.data_in = k_buf;
+
+		areq.op = QCE_OTA_MPKT_F8_OPER;
+
+		pstat->f8_mp_req++;
+		err = submit_req(&areq, podev);
+
+		if (err == 0 && __copy_to_user(user_dst, k_buf, total))
+			err = -EFAULT;
+		kfree(k_buf);
+		break;
+
+	case QCOTA_F8_V_MPKT_REQ:
+		if (!access_ok(VERIFY_WRITE, (void __user *)arg,
+				sizeof(struct qce_f8_varible_multi_pkt_req)))
+			return -EFAULT;
+		if (__copy_from_user(&areq.req.f8_v_mp_req, (void __user *)arg,
+				sizeof(struct qce_f8_varible_multi_pkt_req)))
+			return -EFAULT;
+
+		if (areq.req.f8_v_mp_req.num_pkt > MAX_NUM_V_MULTI_PKT)
+			return -EINVAL;
+
+		for (i = 0, total = 0; i < areq.req.f8_v_mp_req.num_pkt; i++) {
+			if (!access_ok(VERIFY_WRITE, (void __user *)
+				areq.req.f8_v_mp_req.cipher_iov[i].addr,
+				areq.req.f8_v_mp_req.cipher_iov[i].size))
+				return -EFAULT;
+			total += areq.req.f8_v_mp_req.cipher_iov[i].size;
+			total = ALIGN(total, L1_CACHE_BYTES);
+		}
+
+		if (!total)
+			return 0;
+		k_buf = kmalloc(total, GFP_KERNEL);
+		if (k_buf == NULL)
+			return -ENOMEM;
+
+		for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
+			user_src =  areq.req.f8_v_mp_req.cipher_iov[i].addr;
+			if (__copy_from_user(p, (void __user *)user_src,
+				areq.req.f8_v_mp_req.cipher_iov[i].size)) {
+				kfree(k_buf);
+				return -EFAULT;
+			}
+			p += areq.req.f8_v_mp_req.cipher_iov[i].size;
+			p = (uint8_t *) ALIGN(((uintptr_t)p),
+							L1_CACHE_BYTES);
+		}
+
+		areq.req.f8_v_mp_req.qce_f8_req.data_out = k_buf;
+		areq.req.f8_v_mp_req.qce_f8_req.data_in = k_buf;
+		areq.req.f8_v_mp_req.qce_f8_req.data_len =
+			areq.req.f8_v_mp_req.cipher_iov[0].size;
+		areq.steps = 0;
+		areq.op = QCE_OTA_VAR_MPKT_F8_OPER;
+
+		pstat->f8_v_mp_req++;
+		err = submit_req(&areq, podev);
+
+		if (err != 0) {
+			kfree(k_buf);
+			return err;
+		}
+
+		for (i = 0, p = k_buf; i < areq.req.f8_v_mp_req.num_pkt; i++) {
+			user_dst =  areq.req.f8_v_mp_req.cipher_iov[i].addr;
+			if (__copy_to_user(user_dst, p,
+				areq.req.f8_v_mp_req.cipher_iov[i].size)) {
+				kfree(k_buf);
+				return -EFAULT;
+			}
+			p += areq.req.f8_v_mp_req.cipher_iov[i].size;
+			p = (uint8_t *) ALIGN(((uintptr_t)p),
+							L1_CACHE_BYTES);
+		}
+		kfree(k_buf);
+		break;
+	default:
+		return -ENOTTY;
+	}
+
+	return err;
+}
+
+static int qcota_probe(struct platform_device *pdev)
+{
+	void *handle = NULL;
+	int rc = 0;
+	struct ota_dev_control *podev;
+	struct ce_hw_support ce_support;
+	struct ota_qce_dev *pqce;
+	unsigned long flags;
+
+	podev = &qcota_dev;
+	pqce = kzalloc(sizeof(*pqce), GFP_KERNEL);
+	if (!pqce) {
+		pr_err("qcota_probe: Memory allocation FAIL\n");
+		return -ENOMEM;
+	}
+
+	pqce->podev = podev;
+	pqce->active_command = NULL;
+	tasklet_init(&pqce->done_tasklet, req_done, (unsigned long)pqce);
+
+	/* open qce */
+	handle = qce_open(pdev, &rc);
+	if (handle == NULL) {
+		pr_err("%s: device %s, can not open qce\n",
+			__func__, pdev->name);
+		goto err;
+	}
+	if (qce_hw_support(handle, &ce_support) < 0 ||
+					ce_support.ota == false) {
+		pr_err("%s: device %s, qce does not support ota capability\n",
+			__func__, pdev->name);
+		rc = -ENODEV;
+		goto err;
+	}
+	pqce->qce = handle;
+	pqce->pdev = pdev;
+	pqce->total_req = 0;
+	pqce->err_req = 0;
+	platform_set_drvdata(pdev, pqce);
+
+	mutex_lock(&podev->register_lock);
+	rc = 0;
+	if (podev->registered == false) {
+		rc = misc_register(&podev->miscdevice);
+		if (rc == 0) {
+			pqce->unit = podev->total_units;
+			podev->total_units++;
+			podev->registered = true;
+		};
+	} else {
+		pqce->unit = podev->total_units;
+		podev->total_units++;
+	}
+	mutex_unlock(&podev->register_lock);
+	if (rc) {
+		pr_err("ion: failed to register misc device.\n");
+		goto err;
+	}
+
+	spin_lock_irqsave(&podev->lock, flags);
+	list_add_tail(&pqce->qlist, &podev->qce_dev);
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	return 0;
+err:
+	if (handle)
+		qce_close(handle);
+
+	platform_set_drvdata(pdev, NULL);
+	tasklet_kill(&pqce->done_tasklet);
+	kfree(pqce);
+	return rc;
+}
+
+static int qcota_remove(struct platform_device *pdev)
+{
+	struct ota_dev_control *podev;
+	struct ota_qce_dev *pqce;
+	unsigned long flags;
+
+	pqce = platform_get_drvdata(pdev);
+	if (!pqce)
+		return 0;
+	if (pqce->qce)
+		qce_close(pqce->qce);
+
+	podev = pqce->podev;
+	if (!podev)
+		goto ret;
+
+	spin_lock_irqsave(&podev->lock, flags);
+	list_del(&pqce->qlist);
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	mutex_lock(&podev->register_lock);
+	if (--podev->total_units == 0) {
+		if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
+			misc_deregister(&podev->miscdevice);
+		podev->registered = false;
+	}
+	mutex_unlock(&podev->register_lock);
+ret:
+
+	tasklet_kill(&pqce->done_tasklet);
+	kfree(pqce);
+	return 0;
+}
+
+static const struct of_device_id qcota_match[] = {
+	{	.compatible = "qcom,qcota",
+	},
+	{}
+};
+
+static struct platform_driver qcota_plat_driver = {
+	.probe = qcota_probe,
+	.remove = qcota_remove,
+	.driver = {
+		.name = "qcota",
+		.owner = THIS_MODULE,
+		.of_match_table = qcota_match,
+	},
+};
+
+static int _disp_stats(void)
+{
+	struct qcota_stat *pstat;
+	int len = 0;
+	struct ota_dev_control *podev = &qcota_dev;
+	unsigned long flags;
+	struct ota_qce_dev *p;
+
+	pstat = &_qcota_stat;
+	len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+			"\nQTI OTA crypto accelerator Statistics:\n");
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 request                      : %llu\n",
+					pstat->f8_req);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 operation success            : %llu\n",
+					pstat->f8_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 operation fail               : %llu\n",
+					pstat->f8_op_fail);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 MP request                   : %llu\n",
+					pstat->f8_mp_req);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 MP operation success         : %llu\n",
+					pstat->f8_mp_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 MP operation fail            : %llu\n",
+					pstat->f8_mp_op_fail);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 Variable MP request          : %llu\n",
+					pstat->f8_v_mp_req);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 Variable MP operation success: %llu\n",
+					pstat->f8_v_mp_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F8 Variable MP operation fail   : %llu\n",
+					pstat->f8_v_mp_op_fail);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F9 request                      : %llu\n",
+					pstat->f9_req);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F9 operation success            : %llu\n",
+					pstat->f9_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   F9 operation fail               : %llu\n",
+					pstat->f9_op_fail);
+
+	spin_lock_irqsave(&podev->lock, flags);
+
+	list_for_each_entry(p, &podev->qce_dev, qlist) {
+		len += scnprintf(
+			_debug_read_buf + len,
+			DEBUG_MAX_RW_BUF - len - 1,
+			"   Engine %4d Req                 : %llu\n",
+			p->unit,
+			p->total_req
+		);
+		len += scnprintf(
+			_debug_read_buf + len,
+			DEBUG_MAX_RW_BUF - len - 1,
+			"   Engine %4d Req Error           : %llu\n",
+			p->unit,
+			p->err_req
+		);
+	}
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	return len;
+}
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	int rc = -EINVAL;
+	int len;
+
+	len = _disp_stats();
+	if (len <= count)
+		rc = simple_read_from_buffer((void __user *) buf, len,
+			ppos, (void *) _debug_read_buf, len);
+
+	return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	struct ota_dev_control *podev = &qcota_dev;
+	unsigned long flags;
+	struct ota_qce_dev *p;
+
+	memset((char *)&_qcota_stat, 0, sizeof(struct qcota_stat));
+
+	spin_lock_irqsave(&podev->lock, flags);
+
+	list_for_each_entry(p, &podev->qce_dev, qlist) {
+		p->total_req = 0;
+		p->err_req = 0;
+	}
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	return count;
+}
+
+static const struct file_operations _debug_stats_ops = {
+	.open =         _debug_stats_open,
+	.read =         _debug_stats_read,
+	.write =        _debug_stats_write,
+};
+
+static int _qcota_debug_init(void)
+{
+	int rc;
+	char name[DEBUG_MAX_FNAME];
+	struct dentry *dent;
+
+	_debug_dent = debugfs_create_dir("qcota", NULL);
+	if (IS_ERR(_debug_dent)) {
+		pr_err("qcota debugfs_create_dir fail, error %ld\n",
+				PTR_ERR(_debug_dent));
+		return PTR_ERR(_debug_dent);
+	}
+
+	snprintf(name, DEBUG_MAX_FNAME-1, "stats-0");
+	_debug_qcota = 0;
+	dent = debugfs_create_file(name, 0644, _debug_dent,
+				&_debug_qcota, &_debug_stats_ops);
+	if (dent == NULL) {
+		pr_err("qcota debugfs_create_file fail, error %ld\n",
+					PTR_ERR(dent));
+		rc = PTR_ERR(dent);
+		goto err;
+	}
+	return 0;
+err:
+	debugfs_remove_recursive(_debug_dent);
+	return rc;
+}
+
+static int __init qcota_init(void)
+{
+	int rc;
+	struct ota_dev_control *podev;
+
+	rc = _qcota_debug_init();
+	if (rc)
+		return rc;
+
+	podev = &qcota_dev;
+	INIT_LIST_HEAD(&podev->ready_commands);
+	INIT_LIST_HEAD(&podev->qce_dev);
+	spin_lock_init(&podev->lock);
+	mutex_init(&podev->register_lock);
+	podev->registered = false;
+	podev->total_units = 0;
+
+	return platform_driver_register(&qcota_plat_driver);
+}
+static void __exit qcota_exit(void)
+{
+	debugfs_remove_recursive(_debug_dent);
+	platform_driver_unregister(&qcota_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI Ota Crypto driver");
+
+module_init(qcota_init);
+module_exit(qcota_exit);
diff --git a/drivers/crypto/msm/qce.h b/drivers/crypto/msm/qce.h
new file mode 100644
index 0000000..7b4ca24
--- /dev/null
+++ b/drivers/crypto/msm/qce.h
@@ -0,0 +1,191 @@
+/*
+ * QTI Crypto Engine driver API
+ *
+ * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+
+#ifndef __CRYPTO_MSM_QCE_H
+#define __CRYPTO_MSM_QCE_H
+
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/crypto.h>
+
+#include <crypto/algapi.h>
+#include <crypto/aes.h>
+#include <crypto/des.h>
+#include <crypto/sha.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+
+/* SHA digest size  in bytes */
+#define SHA256_DIGESTSIZE		32
+#define SHA1_DIGESTSIZE			20
+
+#define AES_CE_BLOCK_SIZE		16
+
+/* key size in bytes */
+#define HMAC_KEY_SIZE			(SHA1_DIGESTSIZE)    /* hmac-sha1 */
+#define SHA_HMAC_KEY_SIZE		64
+#define DES_KEY_SIZE			8
+#define TRIPLE_DES_KEY_SIZE		24
+#define AES128_KEY_SIZE			16
+#define AES192_KEY_SIZE			24
+#define AES256_KEY_SIZE			32
+#define MAX_CIPHER_KEY_SIZE		AES256_KEY_SIZE
+
+/* iv length in bytes */
+#define AES_IV_LENGTH			16
+#define DES_IV_LENGTH                   8
+#define MAX_IV_LENGTH			AES_IV_LENGTH
+
+/* Maximum number of bytes per transfer */
+#define QCE_MAX_OPER_DATA		0xFF00
+
+/* Maximum Nonce bytes  */
+#define MAX_NONCE  16
+
+typedef void (*qce_comp_func_ptr_t)(void *areq,
+		unsigned char *icv, unsigned char *iv, int ret);
+
+/* Cipher algorithms supported */
+enum qce_cipher_alg_enum {
+	CIPHER_ALG_DES = 0,
+	CIPHER_ALG_3DES = 1,
+	CIPHER_ALG_AES = 2,
+	CIPHER_ALG_LAST
+};
+
+/* Hash and hmac algorithms supported */
+enum qce_hash_alg_enum {
+	QCE_HASH_SHA1   = 0,
+	QCE_HASH_SHA256 = 1,
+	QCE_HASH_SHA1_HMAC   = 2,
+	QCE_HASH_SHA256_HMAC = 3,
+	QCE_HASH_AES_CMAC = 4,
+	QCE_HASH_LAST
+};
+
+/* Cipher encryption/decryption operations */
+enum qce_cipher_dir_enum {
+	QCE_ENCRYPT = 0,
+	QCE_DECRYPT = 1,
+	QCE_CIPHER_DIR_LAST
+};
+
+/* Cipher algorithms modes */
+enum qce_cipher_mode_enum {
+	QCE_MODE_CBC = 0,
+	QCE_MODE_ECB = 1,
+	QCE_MODE_CTR = 2,
+	QCE_MODE_XTS = 3,
+	QCE_MODE_CCM = 4,
+	QCE_CIPHER_MODE_LAST
+};
+
+/* Cipher operation type */
+enum qce_req_op_enum {
+	QCE_REQ_ABLK_CIPHER = 0,
+	QCE_REQ_ABLK_CIPHER_NO_KEY = 1,
+	QCE_REQ_AEAD = 2,
+	QCE_REQ_LAST
+};
+
+/* Algorithms/features supported in CE HW engine */
+struct ce_hw_support {
+	bool sha1_hmac_20; /* Supports 20 bytes of HMAC key*/
+	bool sha1_hmac; /* supports max HMAC key of 64 bytes*/
+	bool sha256_hmac; /* supports max HMAC key of 64 bytes*/
+	bool sha_hmac; /* supports SHA1 and SHA256 MAX HMAC key of 64 bytes*/
+	bool cmac;
+	bool aes_key_192;
+	bool aes_xts;
+	bool aes_ccm;
+	bool ota;
+	bool aligned_only;
+	bool bam;
+	bool is_shared;
+	bool hw_key;
+	bool use_sw_aes_cbc_ecb_ctr_algo;
+	bool use_sw_aead_algo;
+	bool use_sw_aes_xts_algo;
+	bool use_sw_ahash_algo;
+	bool use_sw_hmac_algo;
+	bool use_sw_aes_ccm_algo;
+	bool clk_mgmt_sus_res;
+	unsigned int ce_device;
+	unsigned int ce_hw_instance;
+	unsigned int max_request;
+};
+
+/* Sha operation parameters */
+struct qce_sha_req {
+	qce_comp_func_ptr_t qce_cb;	/* call back */
+	enum qce_hash_alg_enum alg;	/* sha algorithm */
+	unsigned char *digest;		/* sha digest  */
+	struct scatterlist *src;	/* pointer to scatter list entry */
+	uint32_t  auth_data[4];		/* byte count */
+	unsigned char *authkey;		/* auth key */
+	unsigned int  authklen;		/* auth key length */
+	bool first_blk;			/* first block indicator */
+	bool last_blk;			/* last block indicator */
+	unsigned int size;		/* data length in bytes */
+	void *areq;
+	unsigned int  flags;
+};
+
+struct qce_req {
+	enum qce_req_op_enum op;	/* operation type */
+	qce_comp_func_ptr_t qce_cb;	/* call back */
+	void *areq;
+	enum qce_cipher_alg_enum   alg;	/* cipher algorithms*/
+	enum qce_cipher_dir_enum dir;	/* encryption? decryption? */
+	enum qce_cipher_mode_enum mode;	/* algorithm mode  */
+	enum qce_hash_alg_enum auth_alg;/* authentication algorithm for aead */
+	unsigned char *authkey;		/* authentication key  */
+	unsigned int authklen;		/* authentication key kength */
+	unsigned int authsize;		/* authentication key kength */
+	unsigned char  nonce[MAX_NONCE];/* nonce for ccm mode */
+	unsigned char *assoc;		/* Ptr to formatted associated data */
+	unsigned int assoclen;		/* Formatted associated data length  */
+	struct scatterlist *asg;	/* Formatted associated data sg  */
+	unsigned char *enckey;		/* cipher key  */
+	unsigned int encklen;		/* cipher key length */
+	unsigned char *iv;		/* initialization vector */
+	unsigned int ivsize;		/* initialization vector size*/
+	unsigned int cryptlen;		/* data length */
+	unsigned int use_pmem;		/* is source of data PMEM allocated? */
+	struct qcedev_pmem_info *pmem;	/* pointer to pmem_info structure*/
+	unsigned int  flags;
+};
+
+struct qce_pm_table {
+	int (*suspend)(void *handle);
+	int (*resume)(void *handle);
+};
+
+extern struct qce_pm_table qce_pm_table;
+
+void *qce_open(struct platform_device *pdev, int *rc);
+int qce_close(void *handle);
+int qce_aead_req(void *handle, struct qce_req *req);
+int qce_ablk_cipher_req(void *handle, struct qce_req *req);
+int qce_hw_support(void *handle, struct ce_hw_support *support);
+int qce_process_sha_req(void *handle, struct qce_sha_req *s_req);
+int qce_enable_clk(void *handle);
+int qce_disable_clk(void *handle);
+void qce_get_driver_stats(void *handle);
+void qce_clear_driver_stats(void *handle);
+
+#endif /* __CRYPTO_MSM_QCE_H */
diff --git a/drivers/crypto/msm/qce50.c b/drivers/crypto/msm/qce50.c
new file mode 100644
index 0000000..0cf4386
--- /dev/null
+++ b/drivers/crypto/msm/qce50.c
@@ -0,0 +1,6141 @@
+/*
+ * QTI Crypto Engine driver.
+ *
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "QCE50: %s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/io.h>
+#include <linux/platform_device.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/crypto.h>
+#include <linux/bitops.h>
+#include <linux/clk/qcom.h>
+#include <linux/qcrypto.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <soc/qcom/socinfo.h>
+
+#include "qce.h"
+#include "qce50.h"
+#include "qcryptohw_50.h"
+#include "qce_ota.h"
+
+#define CRYPTO_CONFIG_RESET 0xE01EF
+#define MAX_SPS_DESC_FIFO_SIZE 0xfff0
+#define QCE_MAX_NUM_DSCR    0x200
+#define QCE_SECTOR_SIZE	    0x200
+#define CE_CLK_100MHZ	100000000
+#define CE_CLK_DIV	1000000
+
+#define CRYPTO_CORE_MAJOR_VER_NUM 0x05
+#define CRYPTO_CORE_MINOR_VER_NUM 0x03
+#define CRYPTO_CORE_STEP_VER_NUM 0x1
+
+#define CRYPTO_REQ_USER_PAT 0xdead0000
+
+static DEFINE_MUTEX(bam_register_lock);
+static DEFINE_MUTEX(qce_iomap_mutex);
+
+struct bam_registration_info {
+	struct list_head qlist;
+	unsigned long handle;
+	uint32_t cnt;
+	uint32_t bam_mem;
+	void __iomem *bam_iobase;
+	bool support_cmd_dscr;
+};
+static LIST_HEAD(qce50_bam_list);
+
+/* Used to determine the mode */
+#define MAX_BUNCH_MODE_REQ 2
+/* Max number of request supported */
+#define MAX_QCE_BAM_REQ 8
+/* Interrupt flag will be set for every SET_INTR_AT_REQ request */
+#define SET_INTR_AT_REQ			(MAX_QCE_BAM_REQ / 2)
+/* To create extra request space to hold dummy request */
+#define MAX_QCE_BAM_REQ_WITH_DUMMY_REQ	(MAX_QCE_BAM_REQ + 1)
+/* Allocate the memory for MAX_QCE_BAM_REQ  + 1 (for dummy request) */
+#define MAX_QCE_ALLOC_BAM_REQ		MAX_QCE_BAM_REQ_WITH_DUMMY_REQ
+/* QCE driver modes */
+#define IN_INTERRUPT_MODE 0
+#define IN_BUNCH_MODE 1
+/* Dummy request data length */
+#define DUMMY_REQ_DATA_LEN 64
+/* Delay timer to expire when in bunch mode */
+#define DELAY_IN_JIFFIES 5
+/* Index to point the dummy request */
+#define DUMMY_REQ_INDEX			MAX_QCE_BAM_REQ
+
+#define TOTAL_IOVEC_SPACE_PER_PIPE (QCE_MAX_NUM_DSCR * sizeof(struct sps_iovec))
+
+enum qce_owner {
+	QCE_OWNER_NONE   = 0,
+	QCE_OWNER_CLIENT = 1,
+	QCE_OWNER_TIMEOUT = 2
+};
+
+struct dummy_request {
+	struct qce_sha_req sreq;
+	struct scatterlist sg;
+	struct ahash_request areq;
+};
+
+/*
+ * CE HW device structure.
+ * Each engine has an instance of the structure.
+ * Each engine can only handle one crypto operation at one time. It is up to
+ * the sw above to ensure single threading of operation on an engine.
+ */
+struct qce_device {
+	struct device *pdev;        /* Handle to platform_device structure */
+	struct bam_registration_info *pbam;
+
+	unsigned char *coh_vmem;    /* Allocated coherent virtual memory */
+	dma_addr_t coh_pmem;	    /* Allocated coherent physical memory */
+	int memsize;				/* Memory allocated */
+	unsigned char *iovec_vmem;  /* Allocate iovec virtual memory */
+	int iovec_memsize;				/* Memory allocated */
+	uint32_t bam_mem;		/* bam physical address, from DT */
+	uint32_t bam_mem_size;		/* bam io size, from DT */
+	int is_shared;			/* CE HW is shared */
+	bool support_cmd_dscr;
+	bool support_hw_key;
+	bool support_clk_mgmt_sus_res;
+	bool support_only_core_src_clk;
+
+	void __iomem *iobase;	    /* Virtual io base of CE HW  */
+	unsigned int phy_iobase;    /* Physical io base of CE HW    */
+
+	struct clk *ce_core_src_clk;	/* Handle to CE src clk*/
+	struct clk *ce_core_clk;	/* Handle to CE clk */
+	struct clk *ce_clk;		/* Handle to CE clk */
+	struct clk *ce_bus_clk;	/* Handle to CE AXI clk*/
+	bool no_get_around;
+	bool no_ccm_mac_status_get_around;
+	unsigned int ce_opp_freq_hz;
+	bool use_sw_aes_cbc_ecb_ctr_algo;
+	bool use_sw_aead_algo;
+	bool use_sw_aes_xts_algo;
+	bool use_sw_ahash_algo;
+	bool use_sw_hmac_algo;
+	bool use_sw_aes_ccm_algo;
+	uint32_t engines_avail;
+	struct qce_ce_cfg_reg_setting reg;
+	struct ce_bam_info ce_bam_info;
+	struct ce_request_info ce_request_info[MAX_QCE_ALLOC_BAM_REQ];
+	unsigned int ce_request_index;
+	enum qce_owner owner;
+	atomic_t no_of_queued_req;
+	struct timer_list timer;
+	struct dummy_request dummyreq;
+	unsigned int mode;
+	unsigned int intr_cadence;
+	unsigned int dev_no;
+	struct qce_driver_stats qce_stats;
+	atomic_t bunch_cmd_seq;
+	atomic_t last_intr_seq;
+	bool cadence_flag;
+	uint8_t *dummyreq_in_buf;
+};
+
+static void print_notify_debug(struct sps_event_notify *notify);
+static void _sps_producer_callback(struct sps_event_notify *notify);
+static int qce_dummy_req(struct qce_device *pce_dev);
+
+static int _qce50_disp_stats;
+
+/* Standard initialization vector for SHA-1, source: FIPS 180-2 */
+static uint32_t  _std_init_vector_sha1[] =   {
+	0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0
+};
+
+/* Standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint32_t _std_init_vector_sha256[] = {
+	0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A,
+	0x510E527F, 0x9B05688C,	0x1F83D9AB, 0x5BE0CD19
+};
+
+static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned int n;
+
+	n = len  / sizeof(uint32_t);
+	for (; n > 0; n--) {
+		*iv =  ((*b << 24)      & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     |
+				(*(b+3)          & 0xff);
+		b += sizeof(uint32_t);
+		iv++;
+	}
+
+	n = len %  sizeof(uint32_t);
+	if (n == 3) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00);
+	} else if (n == 2) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000);
+	} else if (n == 1) {
+		*iv = ((*b << 24) & 0xff000000);
+	}
+}
+
+static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned int i, j;
+	unsigned char swap_iv[AES_IV_LENGTH];
+
+	memset(swap_iv, 0, AES_IV_LENGTH);
+	for (i = (AES_IV_LENGTH-len), j = len-1;  i < AES_IV_LENGTH; i++, j--)
+		swap_iv[i] = b[j];
+	_byte_stream_to_net_words(iv, swap_iv, AES_IV_LENGTH);
+}
+
+static int count_sg(struct scatterlist *sg, int nbytes)
+{
+	int i;
+
+	for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
+		nbytes -= sg->length;
+	return i;
+}
+
+static int qce_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+	enum dma_data_direction direction)
+{
+	int i;
+
+	for (i = 0; i < nents; ++i) {
+		dma_map_sg(dev, sg, 1, direction);
+		sg = sg_next(sg);
+	}
+
+	return nents;
+}
+
+static int qce_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+	int nents, enum dma_data_direction direction)
+{
+	int i;
+
+	for (i = 0; i < nents; ++i) {
+		dma_unmap_sg(dev, sg, 1, direction);
+		sg = sg_next(sg);
+	}
+
+	return nents;
+}
+
+static int _probe_ce_engine(struct qce_device *pce_dev)
+{
+	unsigned int rev;
+	unsigned int maj_rev, min_rev, step_rev;
+
+	rev = readl_relaxed(pce_dev->iobase + CRYPTO_VERSION_REG);
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before checking the version.
+	 */
+	mb();
+	maj_rev = (rev & CRYPTO_CORE_MAJOR_REV_MASK) >> CRYPTO_CORE_MAJOR_REV;
+	min_rev = (rev & CRYPTO_CORE_MINOR_REV_MASK) >> CRYPTO_CORE_MINOR_REV;
+	step_rev = (rev & CRYPTO_CORE_STEP_REV_MASK) >> CRYPTO_CORE_STEP_REV;
+
+	if (maj_rev != CRYPTO_CORE_MAJOR_VER_NUM) {
+		pr_err("Unsupported QTI crypto device at 0x%x, rev %d.%d.%d\n",
+			pce_dev->phy_iobase, maj_rev, min_rev, step_rev);
+		return -EIO;
+	}
+
+	/*
+	 * The majority of crypto HW bugs have been fixed in 5.3.0 and
+	 * above. That allows a single sps transfer of consumer
+	 * pipe, and a single sps transfer of producer pipe
+	 * for a crypto request. no_get_around flag indicates this.
+	 *
+	 * In 5.3.1, the CCM MAC_FAILED in result dump issue is
+	 * fixed. no_ccm_mac_status_get_around flag indicates this.
+	 */
+	pce_dev->no_get_around = (min_rev >=
+			CRYPTO_CORE_MINOR_VER_NUM) ? true : false;
+	if (min_rev > CRYPTO_CORE_MINOR_VER_NUM)
+		pce_dev->no_ccm_mac_status_get_around = true;
+	else if ((min_rev == CRYPTO_CORE_MINOR_VER_NUM) &&
+			 (step_rev >= CRYPTO_CORE_STEP_VER_NUM))
+		pce_dev->no_ccm_mac_status_get_around = true;
+	else
+		pce_dev->no_ccm_mac_status_get_around = false;
+
+	pce_dev->ce_bam_info.minor_version = min_rev;
+
+	pce_dev->engines_avail = readl_relaxed(pce_dev->iobase +
+					CRYPTO_ENGINES_AVAIL);
+	dev_info(pce_dev->pdev, "QTI Crypto %d.%d.%d device found @0x%x\n",
+			maj_rev, min_rev, step_rev, pce_dev->phy_iobase);
+
+	pce_dev->ce_bam_info.ce_burst_size = MAX_CE_BAM_BURST_SIZE;
+
+	dev_info(pce_dev->pdev, "CE device = 0x%x\n, IO base, CE = 0x%p\n, Consumer (IN) PIPE %d,    Producer (OUT) PIPE %d\n IO base BAM = 0x%p\n BAM IRQ %d\n Engines Availability = 0x%x\n",
+			pce_dev->ce_bam_info.ce_device, pce_dev->iobase,
+			pce_dev->ce_bam_info.dest_pipe_index,
+			pce_dev->ce_bam_info.src_pipe_index,
+			pce_dev->ce_bam_info.bam_iobase,
+			pce_dev->ce_bam_info.bam_irq, pce_dev->engines_avail);
+	return 0;
+};
+
+static struct qce_cmdlist_info *_ce_get_hash_cmdlistinfo(
+			struct qce_device *pce_dev,
+			int req_info, struct qce_sha_req *sreq)
+{
+	struct ce_sps_data *pce_sps_data;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+
+	pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+	cmdlistptr = &pce_sps_data->cmdlistptr;
+	switch (sreq->alg) {
+	case QCE_HASH_SHA1:
+		return &cmdlistptr->auth_sha1;
+	case QCE_HASH_SHA256:
+		return &cmdlistptr->auth_sha256;
+	case QCE_HASH_SHA1_HMAC:
+		return &cmdlistptr->auth_sha1_hmac;
+	case QCE_HASH_SHA256_HMAC:
+		return &cmdlistptr->auth_sha256_hmac;
+	case QCE_HASH_AES_CMAC:
+		if (sreq->authklen == AES128_KEY_SIZE)
+			return &cmdlistptr->auth_aes_128_cmac;
+		return &cmdlistptr->auth_aes_256_cmac;
+	default:
+		return NULL;
+	}
+	return NULL;
+}
+
+static int _ce_setup_hash(struct qce_device *pce_dev,
+				struct qce_sha_req *sreq,
+				struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
+	uint32_t diglen;
+	int i;
+	uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	bool sha1 = false;
+	struct sps_command_element *pce = NULL;
+	bool use_hw_key = false;
+	bool use_pipe_key = false;
+	uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
+	uint32_t auth_cfg;
+
+	if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC) ||
+			(sreq->alg ==  QCE_HASH_AES_CMAC)) {
+
+
+		/* no more check for null key. use flag */
+		if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY)
+						== QCRYPTO_CTX_USE_HW_KEY)
+			use_hw_key = true;
+		else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+						QCRYPTO_CTX_USE_PIPE_KEY)
+			use_pipe_key = true;
+		pce = cmdlistinfo->go_proc;
+		if (use_hw_key == true) {
+			pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
+							pce_dev->phy_iobase);
+		} else {
+			pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
+							pce_dev->phy_iobase);
+			pce = cmdlistinfo->auth_key;
+			if (use_pipe_key == false) {
+				_byte_stream_to_net_words(mackey32,
+						sreq->authkey,
+						sreq->authklen);
+				for (i = 0; i < authk_size_in_word; i++, pce++)
+					pce->data = mackey32[i];
+			}
+		}
+	}
+
+	if (sreq->alg ==  QCE_HASH_AES_CMAC)
+		goto go_proc;
+
+	/* if not the last, the size has to be on the block boundary */
+	if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
+		return -EIO;
+
+	switch (sreq->alg) {
+	case QCE_HASH_SHA1:
+	case QCE_HASH_SHA1_HMAC:
+		diglen = SHA1_DIGEST_SIZE;
+		sha1 = true;
+		break;
+	case QCE_HASH_SHA256:
+	case QCE_HASH_SHA256_HMAC:
+		diglen = SHA256_DIGEST_SIZE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
+	if (sreq->first_blk) {
+		if (sha1) {
+			for (i = 0; i < 5; i++)
+				auth32[i] = _std_init_vector_sha1[i];
+		} else {
+			for (i = 0; i < 8; i++)
+				auth32[i] = _std_init_vector_sha256[i];
+		}
+	} else {
+		_byte_stream_to_net_words(auth32, sreq->digest, diglen);
+	}
+
+	pce = cmdlistinfo->auth_iv;
+	for (i = 0; i < 5; i++, pce++)
+		pce->data = auth32[i];
+
+	if ((sreq->alg == QCE_HASH_SHA256) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC)) {
+		for (i = 5; i < 8; i++, pce++)
+			pce->data = auth32[i];
+	}
+
+	/* write auth_bytecnt 0/1, start with 0 */
+	pce = cmdlistinfo->auth_bytecount;
+	for (i = 0; i < 2; i++, pce++)
+		pce->data = sreq->auth_data[i];
+
+	/* Set/reset  last bit in CFG register  */
+	pce = cmdlistinfo->auth_seg_cfg;
+	auth_cfg = pce->data & ~(1 << CRYPTO_LAST |
+				1 << CRYPTO_FIRST |
+				1 << CRYPTO_USE_PIPE_KEY_AUTH |
+				1 << CRYPTO_USE_HW_KEY_AUTH);
+	if (sreq->last_blk)
+		auth_cfg |= 1 << CRYPTO_LAST;
+	if (sreq->first_blk)
+		auth_cfg |= 1 << CRYPTO_FIRST;
+	if (use_hw_key)
+		auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
+	if (use_pipe_key)
+		auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
+	pce->data = auth_cfg;
+go_proc:
+	/* write auth seg size */
+	pce = cmdlistinfo->auth_seg_size;
+	pce->data = sreq->size;
+
+	pce = cmdlistinfo->encr_seg_cfg;
+	pce->data = 0;
+
+	/* write auth seg size start*/
+	pce = cmdlistinfo->auth_seg_start;
+	pce->data = 0;
+
+	/* write seg size */
+	pce = cmdlistinfo->seg_size;
+
+	/* always ensure there is input data. ZLT does not work for bam-ndp */
+	if (sreq->size)
+		pce->data = sreq->size;
+	else
+		pce->data = pce_dev->ce_bam_info.ce_burst_size;
+
+	return 0;
+}
+
+static struct qce_cmdlist_info *_ce_get_aead_cmdlistinfo(
+			struct qce_device *pce_dev,
+			int req_info, struct qce_req *creq)
+{
+	struct ce_sps_data *pce_sps_data;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+
+	pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+	cmdlistptr = &pce_sps_data->cmdlistptr;
+	switch (creq->alg) {
+	case CIPHER_ALG_DES:
+		switch (creq->mode) {
+		case QCE_MODE_CBC:
+			if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+				return &cmdlistptr->aead_hmac_sha1_cbc_des;
+			else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
+				return &cmdlistptr->aead_hmac_sha256_cbc_des;
+			else
+				return NULL;
+			break;
+		default:
+			return NULL;
+		}
+		break;
+	case CIPHER_ALG_3DES:
+		switch (creq->mode) {
+		case QCE_MODE_CBC:
+			if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+				return &cmdlistptr->aead_hmac_sha1_cbc_3des;
+			else if (creq->auth_alg == QCE_HASH_SHA256_HMAC)
+				return &cmdlistptr->aead_hmac_sha256_cbc_3des;
+			else
+				return NULL;
+			break;
+		default:
+			return NULL;
+		}
+		break;
+	case CIPHER_ALG_AES:
+		switch (creq->mode) {
+		case QCE_MODE_CBC:
+			if (creq->encklen ==  AES128_KEY_SIZE) {
+				if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+					return &cmdlistptr->
+						aead_hmac_sha1_cbc_aes_128;
+				else if (creq->auth_alg ==
+						QCE_HASH_SHA256_HMAC)
+					return &cmdlistptr->
+						aead_hmac_sha256_cbc_aes_128;
+				else
+					return NULL;
+			} else if (creq->encklen ==  AES256_KEY_SIZE) {
+				if (creq->auth_alg == QCE_HASH_SHA1_HMAC)
+					return &cmdlistptr->
+						aead_hmac_sha1_cbc_aes_256;
+				else if (creq->auth_alg ==
+						QCE_HASH_SHA256_HMAC)
+					return &cmdlistptr->
+						aead_hmac_sha256_cbc_aes_256;
+				else
+					return NULL;
+			} else
+				return NULL;
+			break;
+		default:
+			return NULL;
+		}
+		break;
+
+	default:
+		return NULL;
+	}
+	return NULL;
+}
+
+static int _ce_setup_aead(struct qce_device *pce_dev, struct qce_req *q_req,
+		uint32_t totallen_in, uint32_t coffset,
+		struct qce_cmdlist_info *cmdlistinfo)
+{
+	int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
+	int i;
+	uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
+	struct sps_command_element *pce;
+	uint32_t a_cfg;
+	uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
+	uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
+	uint32_t enck_size_in_word = 0;
+	uint32_t enciv_in_word;
+	uint32_t key_size;
+	uint32_t encr_cfg = 0;
+	uint32_t ivsize = q_req->ivsize;
+
+	key_size = q_req->encklen;
+	enck_size_in_word = key_size/sizeof(uint32_t);
+
+	switch (q_req->alg) {
+	case CIPHER_ALG_DES:
+		enciv_in_word = 2;
+		break;
+	case CIPHER_ALG_3DES:
+		enciv_in_word = 2;
+		break;
+	case CIPHER_ALG_AES:
+		if ((key_size != AES128_KEY_SIZE) &&
+				(key_size != AES256_KEY_SIZE))
+			return -EINVAL;
+		enciv_in_word = 4;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* only support cbc mode */
+	if (q_req->mode != QCE_MODE_CBC)
+		return -EINVAL;
+
+	_byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+	pce = cmdlistinfo->encr_cntr_iv;
+	for (i = 0; i < enciv_in_word; i++, pce++)
+		pce->data = enciv32[i];
+
+	/*
+	 * write encr key
+	 * do not use  hw key or pipe key
+	 */
+	_byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
+	pce = cmdlistinfo->encr_key;
+	for (i = 0; i < enck_size_in_word; i++, pce++)
+		pce->data = enckey32[i];
+
+	/* write encr seg cfg */
+	pce = cmdlistinfo->encr_seg_cfg;
+	encr_cfg = pce->data;
+	if (q_req->dir == QCE_ENCRYPT)
+		encr_cfg |= (1 << CRYPTO_ENCODE);
+	else
+		encr_cfg &= ~(1 << CRYPTO_ENCODE);
+	pce->data = encr_cfg;
+
+	/* we only support sha1-hmac and sha256-hmac at this point */
+	_byte_stream_to_net_words(mackey32, q_req->authkey,
+					q_req->authklen);
+	pce = cmdlistinfo->auth_key;
+	for (i = 0; i < authk_size_in_word; i++, pce++)
+		pce->data = mackey32[i];
+	pce = cmdlistinfo->auth_iv;
+
+	if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
+		for (i = 0; i < 5; i++, pce++)
+			pce->data = _std_init_vector_sha1[i];
+	else
+		for (i = 0; i < 8; i++, pce++)
+			pce->data = _std_init_vector_sha256[i];
+
+	/* write auth_bytecnt 0/1, start with 0 */
+	pce = cmdlistinfo->auth_bytecount;
+	for (i = 0; i < 2; i++, pce++)
+		pce->data = 0;
+
+	pce = cmdlistinfo->auth_seg_cfg;
+	a_cfg = pce->data;
+	a_cfg &= ~(CRYPTO_AUTH_POS_MASK);
+	if (q_req->dir == QCE_ENCRYPT)
+		a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+	else
+		a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+	pce->data = a_cfg;
+
+	/* write auth seg size */
+	pce = cmdlistinfo->auth_seg_size;
+	pce->data = totallen_in;
+
+	/* write auth seg size start*/
+	pce = cmdlistinfo->auth_seg_start;
+	pce->data = 0;
+
+	/* write seg size */
+	pce = cmdlistinfo->seg_size;
+	pce->data = totallen_in;
+
+	/* write encr seg size */
+	pce = cmdlistinfo->encr_seg_size;
+	pce->data = q_req->cryptlen;
+
+	/* write encr seg start */
+	pce = cmdlistinfo->encr_seg_start;
+	pce->data = (coffset & 0xffff);
+
+	return 0;
+
+}
+
+static struct qce_cmdlist_info *_ce_get_cipher_cmdlistinfo(
+			struct qce_device *pce_dev,
+			int req_info, struct qce_req *creq)
+{
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	cmdlistptr = &pce_sps_data->cmdlistptr;
+	if (creq->alg != CIPHER_ALG_AES) {
+		switch (creq->alg) {
+		case CIPHER_ALG_DES:
+			if (creq->mode == QCE_MODE_ECB)
+				return &cmdlistptr->cipher_des_ecb;
+			return &cmdlistptr->cipher_des_cbc;
+		case CIPHER_ALG_3DES:
+			if (creq->mode == QCE_MODE_ECB)
+				return &cmdlistptr->cipher_3des_ecb;
+			return &cmdlistptr->cipher_3des_cbc;
+		default:
+			return NULL;
+		}
+	} else {
+		switch (creq->mode) {
+		case QCE_MODE_ECB:
+			if (creq->encklen == AES128_KEY_SIZE)
+				return &cmdlistptr->cipher_aes_128_ecb;
+			return &cmdlistptr->cipher_aes_256_ecb;
+		case QCE_MODE_CBC:
+		case QCE_MODE_CTR:
+			if (creq->encklen == AES128_KEY_SIZE)
+				return &cmdlistptr->cipher_aes_128_cbc_ctr;
+			return &cmdlistptr->cipher_aes_256_cbc_ctr;
+		case QCE_MODE_XTS:
+			if (creq->encklen/2 == AES128_KEY_SIZE)
+				return &cmdlistptr->cipher_aes_128_xts;
+			return &cmdlistptr->cipher_aes_256_xts;
+		case QCE_MODE_CCM:
+			if (creq->encklen == AES128_KEY_SIZE)
+				return &cmdlistptr->aead_aes_128_ccm;
+			return &cmdlistptr->aead_aes_256_ccm;
+		default:
+			return NULL;
+		}
+	}
+	return NULL;
+}
+
+static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq,
+		uint32_t totallen_in, uint32_t coffset,
+		struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
+			0, 0, 0, 0};
+	uint32_t enck_size_in_word = 0;
+	uint32_t key_size;
+	bool use_hw_key = false;
+	bool use_pipe_key = false;
+	uint32_t encr_cfg = 0;
+	uint32_t ivsize = creq->ivsize;
+	int i;
+	struct sps_command_element *pce = NULL;
+
+	if (creq->mode == QCE_MODE_XTS)
+		key_size = creq->encklen/2;
+	else
+		key_size = creq->encklen;
+
+	pce = cmdlistinfo->go_proc;
+	if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+		use_hw_key = true;
+	} else {
+		if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+					QCRYPTO_CTX_USE_PIPE_KEY)
+			use_pipe_key = true;
+	}
+	pce = cmdlistinfo->go_proc;
+	if (use_hw_key == true)
+		pce->addr = (uint32_t)(CRYPTO_GOPROC_QC_KEY_REG +
+						pce_dev->phy_iobase);
+	else
+		pce->addr = (uint32_t)(CRYPTO_GOPROC_REG +
+						pce_dev->phy_iobase);
+	if ((use_pipe_key == false) && (use_hw_key == false)) {
+		_byte_stream_to_net_words(enckey32, creq->enckey, key_size);
+		enck_size_in_word = key_size/sizeof(uint32_t);
+	}
+
+	if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
+		uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
+		uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
+		uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
+		uint32_t auth_cfg = 0;
+
+		/* write nonce */
+		_byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
+		pce = cmdlistinfo->auth_nonce_info;
+		for (i = 0; i < noncelen32; i++, pce++)
+			pce->data = nonce32[i];
+
+		if (creq->authklen ==  AES128_KEY_SIZE)
+			auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
+		else {
+			if (creq->authklen ==  AES256_KEY_SIZE)
+				auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
+		}
+		if (creq->dir == QCE_ENCRYPT)
+			auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+		else
+			auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+		auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
+
+		if (use_hw_key == true)	{
+			auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
+		} else {
+			auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+			/* write auth key */
+			pce = cmdlistinfo->auth_key;
+			for (i = 0; i < authklen32; i++, pce++)
+				pce->data = enckey32[i];
+		}
+
+		pce = cmdlistinfo->auth_seg_cfg;
+		pce->data = auth_cfg;
+
+		pce = cmdlistinfo->auth_seg_size;
+		if (creq->dir == QCE_ENCRYPT)
+			pce->data = totallen_in;
+		else
+			pce->data = totallen_in - creq->authsize;
+		pce = cmdlistinfo->auth_seg_start;
+		pce->data = 0;
+	} else {
+		if (creq->op != QCE_REQ_AEAD) {
+			pce = cmdlistinfo->auth_seg_cfg;
+			pce->data = 0;
+		}
+	}
+	switch (creq->mode) {
+	case QCE_MODE_ECB:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
+		break;
+	case QCE_MODE_CBC:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
+		break;
+	case QCE_MODE_XTS:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
+		break;
+	case QCE_MODE_CCM:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
+		encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
+				(CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
+		break;
+	case QCE_MODE_CTR:
+	default:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
+		break;
+	}
+
+	switch (creq->alg) {
+	case CIPHER_ALG_DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			pce = cmdlistinfo->encr_cntr_iv;
+			pce->data = enciv32[0];
+			pce++;
+			pce->data = enciv32[1];
+		}
+		if (use_hw_key == false) {
+			pce = cmdlistinfo->encr_key;
+			pce->data = enckey32[0];
+			pce++;
+			pce->data = enckey32[1];
+		}
+		break;
+	case CIPHER_ALG_3DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			pce = cmdlistinfo->encr_cntr_iv;
+			pce->data = enciv32[0];
+			pce++;
+			pce->data = enciv32[1];
+		}
+		if (use_hw_key == false) {
+			/* write encr key */
+			pce = cmdlistinfo->encr_key;
+			for (i = 0; i < 6; i++, pce++)
+				pce->data = enckey32[i];
+		}
+		break;
+	case CIPHER_ALG_AES:
+	default:
+		if (creq->mode ==  QCE_MODE_XTS) {
+			uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
+					= {0, 0, 0, 0, 0, 0, 0, 0};
+			uint32_t xtsklen =
+					creq->encklen/(2 * sizeof(uint32_t));
+
+			if ((use_hw_key == false) && (use_pipe_key == false)) {
+				_byte_stream_to_net_words(xtskey32,
+					(creq->enckey + creq->encklen/2),
+							creq->encklen/2);
+				/* write xts encr key */
+				pce = cmdlistinfo->encr_xts_key;
+				for (i = 0; i < xtsklen; i++, pce++)
+					pce->data = xtskey32[i];
+			}
+			/* write xts du size */
+			pce = cmdlistinfo->encr_xts_du_size;
+			switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
+			case QCRYPTO_CTX_XTS_DU_SIZE_512B:
+				pce->data = min((unsigned int)QCE_SECTOR_SIZE,
+						creq->cryptlen);
+				break;
+			case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
+				pce->data =
+					min((unsigned int)QCE_SECTOR_SIZE * 2,
+					creq->cryptlen);
+				break;
+			default:
+				pce->data = creq->cryptlen;
+				break;
+			}
+		}
+		if (creq->mode !=  QCE_MODE_ECB) {
+			if (creq->mode ==  QCE_MODE_XTS)
+				_byte_stream_swap_to_net_words(enciv32,
+							creq->iv, ivsize);
+			else
+				_byte_stream_to_net_words(enciv32, creq->iv,
+								ivsize);
+			/* write encr cntr iv */
+			pce = cmdlistinfo->encr_cntr_iv;
+			for (i = 0; i < 4; i++, pce++)
+				pce->data = enciv32[i];
+
+			if (creq->mode ==  QCE_MODE_CCM) {
+				/* write cntr iv for ccm */
+				pce = cmdlistinfo->encr_ccm_cntr_iv;
+				for (i = 0; i < 4; i++, pce++)
+					pce->data = enciv32[i];
+				/* update cntr_iv[3] by one */
+				pce = cmdlistinfo->encr_cntr_iv;
+				pce += 3;
+				pce->data += 1;
+			}
+		}
+
+		if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
+			encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+					CRYPTO_ENCR_KEY_SZ);
+		} else {
+			if (use_hw_key == false) {
+				/* write encr key */
+				pce = cmdlistinfo->encr_key;
+				for (i = 0; i < enck_size_in_word; i++, pce++)
+					pce->data = enckey32[i];
+			}
+		} /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
+		break;
+	} /* end of switch (creq->mode)  */
+
+	if (use_pipe_key)
+		encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
+					<< CRYPTO_USE_PIPE_KEY_ENCR);
+
+	/* write encr seg cfg */
+	pce = cmdlistinfo->encr_seg_cfg;
+	if ((creq->alg == CIPHER_ALG_DES) || (creq->alg == CIPHER_ALG_3DES)) {
+		if (creq->dir == QCE_ENCRYPT)
+			pce->data |= (1 << CRYPTO_ENCODE);
+		else
+			pce->data &= ~(1 << CRYPTO_ENCODE);
+		encr_cfg = pce->data;
+	}  else	{
+		encr_cfg |=
+			((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
+	}
+	if (use_hw_key == true)
+		encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+	else
+		encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+	pce->data = encr_cfg;
+
+	/* write encr seg size */
+	pce = cmdlistinfo->encr_seg_size;
+	if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT))
+		pce->data = (creq->cryptlen + creq->authsize);
+	else
+		pce->data = creq->cryptlen;
+
+	/* write encr seg start */
+	pce = cmdlistinfo->encr_seg_start;
+	pce->data = (coffset & 0xffff);
+
+	/* write seg size  */
+	pce = cmdlistinfo->seg_size;
+	pce->data = totallen_in;
+
+	return 0;
+};
+
+static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req *req,
+		struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+	uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+	uint32_t cfg;
+	struct sps_command_element *pce;
+	int i;
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		cfg = pce_dev->reg.auth_cfg_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		cfg = pce_dev->reg.auth_cfg_snow3g;
+		break;
+	};
+
+	/* write key in CRYPTO_AUTH_IV0-3_REG */
+	_byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
+	pce = cmdlistinfo->auth_iv;
+	for (i = 0; i < key_size_in_word; i++, pce++)
+		pce->data = ikey32[i];
+
+	/* write last bits  in CRYPTO_AUTH_IV4_REG  */
+	pce->data = req->last_bits;
+
+	/* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
+	pce = cmdlistinfo->auth_bytecount;
+	pce->data = req->fresh;
+
+	/* write count-i  to CRYPTO_AUTH_BYTECNT1_REG */
+	pce++;
+	pce->data = req->count_i;
+
+	/* write auth seg cfg */
+	pce = cmdlistinfo->auth_seg_cfg;
+	if (req->direction == QCE_OTA_DIR_DOWNLINK)
+		cfg |= BIT(CRYPTO_F9_DIRECTION);
+	pce->data = cfg;
+
+	/* write auth seg size */
+	pce = cmdlistinfo->auth_seg_size;
+	pce->data = req->msize;
+
+	/* write auth seg start*/
+	pce = cmdlistinfo->auth_seg_start;
+	pce->data = 0;
+
+	/* write seg size  */
+	pce = cmdlistinfo->seg_size;
+	pce->data = req->msize;
+
+
+	/* write go */
+	pce = cmdlistinfo->go_proc;
+	pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
+	return 0;
+}
+
+static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req,
+		bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset,
+		uint16_t cipher_size,
+		struct qce_cmdlist_info *cmdlistinfo)
+{
+	uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+	uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+	uint32_t cfg;
+	struct sps_command_element *pce;
+	int i;
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		cfg = pce_dev->reg.encr_cfg_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		cfg = pce_dev->reg.encr_cfg_snow3g;
+		break;
+	};
+	/* write key */
+	_byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
+	pce = cmdlistinfo->encr_key;
+	for (i = 0; i < key_size_in_word; i++, pce++)
+		pce->data = ckey32[i];
+
+	/* write encr seg cfg */
+	pce = cmdlistinfo->encr_seg_cfg;
+	if (key_stream_mode)
+		cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
+	if (req->direction == QCE_OTA_DIR_DOWNLINK)
+		cfg |= BIT(CRYPTO_F8_DIRECTION);
+	pce->data = cfg;
+
+	/* write encr seg start */
+	pce = cmdlistinfo->encr_seg_start;
+	pce->data = (cipher_offset & 0xffff);
+
+	/* write encr seg size  */
+	pce = cmdlistinfo->encr_seg_size;
+	pce->data = cipher_size;
+
+	/* write seg size  */
+	pce = cmdlistinfo->seg_size;
+	pce->data = req->data_len;
+
+	/* write cntr0_iv0 for countC */
+	pce = cmdlistinfo->encr_cntr_iv;
+	pce->data = req->count_c;
+	/* write cntr1_iv1 for nPkts, and bearer */
+	pce++;
+	if (npkts == 1)
+		npkts = 0;
+	pce->data = req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
+				npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT;
+
+	/* write go */
+	pce = cmdlistinfo->go_proc;
+	pce->addr = (uint32_t)(CRYPTO_GOPROC_REG + pce_dev->phy_iobase);
+
+	return 0;
+}
+
+static void _qce_dump_descr_fifos(struct qce_device *pce_dev, int req_info)
+{
+	int i, j, ents;
+	struct ce_sps_data *pce_sps_data;
+	struct sps_iovec *iovec;
+	uint32_t cmd_flags = SPS_IOVEC_FLAG_CMD;
+
+	pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+	iovec = pce_sps_data->in_transfer.iovec;
+	pr_info("==============================================\n");
+	pr_info("CONSUMER (TX/IN/DEST) PIPE DESCRIPTOR\n");
+	pr_info("==============================================\n");
+	for (i = 0; i <  pce_sps_data->in_transfer.iovec_count; i++) {
+		pr_info(" [%d] addr=0x%x  size=0x%x  flags=0x%x\n", i,
+					iovec->addr, iovec->size, iovec->flags);
+		if (iovec->flags & cmd_flags) {
+			struct sps_command_element *pced;
+
+			pced = (struct sps_command_element *)
+					(GET_VIRT_ADDR(iovec->addr));
+			ents = iovec->size/(sizeof(struct sps_command_element));
+			for (j = 0; j < ents; j++) {
+				pr_info("      [%d] [0x%x] 0x%x\n", j,
+					pced->addr, pced->data);
+				pced++;
+			}
+		}
+		iovec++;
+	}
+
+	pr_info("==============================================\n");
+	pr_info("PRODUCER (RX/OUT/SRC) PIPE DESCRIPTOR\n");
+	pr_info("==============================================\n");
+	iovec =  pce_sps_data->out_transfer.iovec;
+	for (i = 0; i <   pce_sps_data->out_transfer.iovec_count; i++) {
+		pr_info(" [%d] addr=0x%x  size=0x%x  flags=0x%x\n", i,
+				iovec->addr, iovec->size, iovec->flags);
+		iovec++;
+	}
+}
+
+#ifdef QCE_DEBUG
+
+static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
+{
+	_qce_dump_descr_fifos(pce_dev, req_info);
+}
+
+#define QCE_WRITE_REG(val, addr)					\
+{									\
+	pr_info("      [0x%p] 0x%x\n", addr, (uint32_t)val);		\
+	writel_relaxed(val, addr);					\
+}
+
+#else
+
+static void _qce_dump_descr_fifos_dbg(struct qce_device *pce_dev, int req_info)
+{
+}
+
+#define QCE_WRITE_REG(val, addr)					\
+	writel_relaxed(val, addr)
+
+#endif
+
+static int _ce_setup_hash_direct(struct qce_device *pce_dev,
+				struct qce_sha_req *sreq)
+{
+	uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)];
+	uint32_t diglen;
+	bool use_hw_key = false;
+	bool use_pipe_key = false;
+	int i;
+	uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	uint32_t authk_size_in_word = sreq->authklen/sizeof(uint32_t);
+	bool sha1 = false;
+	uint32_t auth_cfg = 0;
+
+	/* clear status */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/*
+	 * Ensure previous instructions (setting the CONFIG register)
+	 * was completed before issuing starting to set other config register
+	 * This is to ensure the configurations are done in correct endian-ness
+	 * as set in the CONFIG registers
+	 */
+	mb();
+
+	if (sreq->alg == QCE_HASH_AES_CMAC) {
+		/* write seg_cfg */
+		QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+		/* write seg_cfg */
+		QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+		/* write seg_cfg */
+		QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+
+		/* Clear auth_ivn, auth_keyn registers  */
+		for (i = 0; i < 16; i++) {
+			QCE_WRITE_REG(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+			QCE_WRITE_REG(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
+		}
+		/* write auth_bytecnt 0/1/2/3, start with 0 */
+		for (i = 0; i < 4; i++)
+			QCE_WRITE_REG(0, pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT0_REG +
+						i * sizeof(uint32_t));
+
+		if (sreq->authklen == AES128_KEY_SIZE)
+			auth_cfg = pce_dev->reg.auth_cfg_cmac_128;
+		else
+			auth_cfg = pce_dev->reg.auth_cfg_cmac_256;
+	}
+
+	if ((sreq->alg == QCE_HASH_SHA1_HMAC) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC) ||
+			(sreq->alg ==  QCE_HASH_AES_CMAC)) {
+
+		_byte_stream_to_net_words(mackey32, sreq->authkey,
+						sreq->authklen);
+
+		/* no more check for null key. use flag to check*/
+
+		if ((sreq->flags & QCRYPTO_CTX_USE_HW_KEY) ==
+					QCRYPTO_CTX_USE_HW_KEY) {
+			use_hw_key = true;
+		} else if ((sreq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+						QCRYPTO_CTX_USE_PIPE_KEY) {
+			use_pipe_key = true;
+		} else {
+			/* setup key */
+			for (i = 0; i < authk_size_in_word; i++)
+				QCE_WRITE_REG(mackey32[i], (pce_dev->iobase +
+					(CRYPTO_AUTH_KEY0_REG +
+							i*sizeof(uint32_t))));
+		}
+	}
+
+	if (sreq->alg ==  QCE_HASH_AES_CMAC)
+		goto go_proc;
+
+	/* if not the last, the size has to be on the block boundary */
+	if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE))
+		return -EIO;
+
+	switch (sreq->alg) {
+	case QCE_HASH_SHA1:
+		auth_cfg = pce_dev->reg.auth_cfg_sha1;
+		diglen = SHA1_DIGEST_SIZE;
+		sha1 = true;
+		break;
+	case QCE_HASH_SHA1_HMAC:
+		auth_cfg = pce_dev->reg.auth_cfg_hmac_sha1;
+		diglen = SHA1_DIGEST_SIZE;
+		sha1 = true;
+		break;
+	case QCE_HASH_SHA256:
+		auth_cfg = pce_dev->reg.auth_cfg_sha256;
+		diglen = SHA256_DIGEST_SIZE;
+		break;
+	case QCE_HASH_SHA256_HMAC:
+		auth_cfg = pce_dev->reg.auth_cfg_hmac_sha256;
+		diglen = SHA256_DIGEST_SIZE;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	/* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */
+	if (sreq->first_blk) {
+		if (sha1) {
+			for (i = 0; i < 5; i++)
+				auth32[i] = _std_init_vector_sha1[i];
+		} else {
+			for (i = 0; i < 8; i++)
+				auth32[i] = _std_init_vector_sha256[i];
+		}
+	} else {
+		_byte_stream_to_net_words(auth32, sreq->digest, diglen);
+	}
+
+	/* Set auth_ivn, auth_keyn registers  */
+	for (i = 0; i < 5; i++)
+		QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
+			(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+
+	if ((sreq->alg == QCE_HASH_SHA256) ||
+			(sreq->alg == QCE_HASH_SHA256_HMAC)) {
+		for (i = 5; i < 8; i++)
+			QCE_WRITE_REG(auth32[i], (pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+	}
+
+
+	/* write auth_bytecnt 0/1/2/3, start with 0 */
+	for (i = 0; i < 2; i++)
+		QCE_WRITE_REG(sreq->auth_data[i], pce_dev->iobase +
+					CRYPTO_AUTH_BYTECNT0_REG +
+						i * sizeof(uint32_t));
+
+	/* Set/reset  last bit in CFG register  */
+	if (sreq->last_blk)
+		auth_cfg |= 1 << CRYPTO_LAST;
+	else
+		auth_cfg &= ~(1 << CRYPTO_LAST);
+	if (sreq->first_blk)
+		auth_cfg |= 1 << CRYPTO_FIRST;
+	else
+		auth_cfg &= ~(1 << CRYPTO_FIRST);
+	if (use_hw_key)
+		auth_cfg |= 1 << CRYPTO_USE_HW_KEY_AUTH;
+	if (use_pipe_key)
+		auth_cfg |= 1 << CRYPTO_USE_PIPE_KEY_AUTH;
+go_proc:
+	 /* write seg_cfg */
+	QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+	/* write auth seg_size   */
+	QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+	/* write auth_seg_start   */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+
+	/* reset encr seg_cfg   */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write seg_size   */
+	QCE_WRITE_REG(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/* issue go to crypto   */
+	if (use_hw_key == false) {
+		QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+				(1 << CRYPTO_CLR_CNTXT)),
+				pce_dev->iobase + CRYPTO_GOPROC_REG);
+	} else {
+		QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+				pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
+	}
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+}
+
+static int _ce_setup_aead_direct(struct qce_device *pce_dev,
+		struct qce_req *q_req, uint32_t totallen_in, uint32_t coffset)
+{
+	int32_t authk_size_in_word = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
+	int i;
+	uint32_t mackey32[SHA_HMAC_KEY_SIZE/sizeof(uint32_t)] = {0};
+	uint32_t a_cfg;
+	uint32_t enckey32[(MAX_CIPHER_KEY_SIZE*2)/sizeof(uint32_t)] = {0};
+	uint32_t enciv32[MAX_IV_LENGTH/sizeof(uint32_t)] = {0};
+	uint32_t enck_size_in_word = 0;
+	uint32_t enciv_in_word;
+	uint32_t key_size;
+	uint32_t ivsize = q_req->ivsize;
+	uint32_t encr_cfg;
+
+
+	/* clear status */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/*
+	 * Ensure previous instructions (setting the CONFIG register)
+	 * was completed before issuing starting to set other config register
+	 * This is to ensure the configurations are done in correct endian-ness
+	 * as set in the CONFIG registers
+	 */
+	mb();
+
+	key_size = q_req->encklen;
+	enck_size_in_word = key_size/sizeof(uint32_t);
+
+	switch (q_req->alg) {
+
+	case CIPHER_ALG_DES:
+
+		switch (q_req->mode) {
+		case QCE_MODE_CBC:
+			encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		enciv_in_word = 2;
+		break;
+
+	case CIPHER_ALG_3DES:
+
+		switch (q_req->mode) {
+		case QCE_MODE_CBC:
+			encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
+			break;
+		default:
+			return -EINVAL;
+		}
+
+		enciv_in_word = 2;
+
+		break;
+
+	case CIPHER_ALG_AES:
+
+		switch (q_req->mode) {
+		case QCE_MODE_CBC:
+			if (key_size == AES128_KEY_SIZE)
+				encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
+			else if (key_size  == AES256_KEY_SIZE)
+				encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
+			else
+				return -EINVAL;
+			break;
+		default:
+		return -EINVAL;
+		}
+
+		enciv_in_word = 4;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+
+
+
+	/* write CNTR0_IV0_REG */
+	if (q_req->mode !=  QCE_MODE_ECB) {
+		_byte_stream_to_net_words(enciv32, q_req->iv, ivsize);
+		for (i = 0; i < enciv_in_word; i++)
+			QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)));
+	}
+
+	/*
+	 * write encr key
+	 * do not use  hw key or pipe key
+	 */
+	_byte_stream_to_net_words(enckey32, q_req->enckey, key_size);
+	for (i = 0; i < enck_size_in_word; i++)
+		QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)));
+
+	/* write encr seg cfg */
+	if (q_req->dir == QCE_ENCRYPT)
+		encr_cfg |= (1 << CRYPTO_ENCODE);
+	QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* we only support sha1-hmac and sha256-hmac at this point */
+	_byte_stream_to_net_words(mackey32, q_req->authkey,
+					q_req->authklen);
+	for (i = 0; i < authk_size_in_word; i++)
+		QCE_WRITE_REG(mackey32[i], pce_dev->iobase +
+			(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)));
+
+	if (q_req->auth_alg == QCE_HASH_SHA1_HMAC) {
+		for (i = 0; i < 5; i++)
+			QCE_WRITE_REG(_std_init_vector_sha1[i],
+				pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
+	} else {
+		for (i = 0; i < 8; i++)
+			QCE_WRITE_REG(_std_init_vector_sha256[i],
+				pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)));
+	}
+
+	/* write auth_bytecnt 0/1, start with 0 */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG);
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG);
+
+	/* write encr seg size    */
+	QCE_WRITE_REG(q_req->cryptlen, pce_dev->iobase +
+			CRYPTO_ENCR_SEG_SIZE_REG);
+
+	/* write encr start   */
+	QCE_WRITE_REG(coffset & 0xffff, pce_dev->iobase +
+			CRYPTO_ENCR_SEG_START_REG);
+
+	if (q_req->auth_alg == QCE_HASH_SHA1_HMAC)
+		a_cfg = pce_dev->reg.auth_cfg_aead_sha1_hmac;
+	else
+		a_cfg = pce_dev->reg.auth_cfg_aead_sha256_hmac;
+
+	if (q_req->dir == QCE_ENCRYPT)
+		a_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+	else
+		a_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	/* write auth seg_cfg */
+	QCE_WRITE_REG(a_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+	/* write auth seg_size   */
+	QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+	/* write auth_seg_start   */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+
+
+	/* write seg_size   */
+	QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+
+							CRYPTO_CONFIG_REG));
+	/* issue go to crypto   */
+	QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+				(1 << CRYPTO_CLR_CNTXT)),
+				pce_dev->iobase + CRYPTO_GOPROC_REG);
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+};
+
+static int _ce_setup_cipher_direct(struct qce_device *pce_dev,
+		struct qce_req *creq, uint32_t totallen_in, uint32_t coffset)
+{
+	uint32_t enckey32[(MAX_CIPHER_KEY_SIZE * 2)/sizeof(uint32_t)] = {
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+	uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = {
+			0, 0, 0, 0};
+	uint32_t enck_size_in_word = 0;
+	uint32_t key_size;
+	bool use_hw_key = false;
+	bool use_pipe_key = false;
+	uint32_t encr_cfg = 0;
+	uint32_t ivsize = creq->ivsize;
+	int i;
+
+	/* clear status */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/*
+	 * Ensure previous instructions (setting the CONFIG register)
+	 * was completed before issuing starting to set other config register
+	 * This is to ensure the configurations are done in correct endian-ness
+	 * as set in the CONFIG registers
+	 */
+	mb();
+
+	if (creq->mode == QCE_MODE_XTS)
+		key_size = creq->encklen/2;
+	else
+		key_size = creq->encklen;
+
+	if ((creq->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+		use_hw_key = true;
+	} else {
+		if ((creq->flags & QCRYPTO_CTX_USE_PIPE_KEY) ==
+					QCRYPTO_CTX_USE_PIPE_KEY)
+			use_pipe_key = true;
+	}
+	if ((use_pipe_key == false) && (use_hw_key == false)) {
+		_byte_stream_to_net_words(enckey32, creq->enckey, key_size);
+		enck_size_in_word = key_size/sizeof(uint32_t);
+	}
+	if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) {
+		uint32_t authklen32 = creq->encklen/sizeof(uint32_t);
+		uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t);
+		uint32_t nonce32[MAX_NONCE/sizeof(uint32_t)] = {0, 0, 0, 0};
+		uint32_t auth_cfg = 0;
+
+		/* Clear auth_ivn, auth_keyn registers  */
+		for (i = 0; i < 16; i++) {
+			QCE_WRITE_REG(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+			QCE_WRITE_REG(0, (pce_dev->iobase +
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t))));
+		}
+		/* write auth_bytecnt 0/1/2/3, start with 0 */
+		for (i = 0; i < 4; i++)
+			QCE_WRITE_REG(0, pce_dev->iobase +
+						CRYPTO_AUTH_BYTECNT0_REG +
+						i * sizeof(uint32_t));
+		/* write nonce */
+		_byte_stream_to_net_words(nonce32, creq->nonce, MAX_NONCE);
+		for (i = 0; i < noncelen32; i++)
+			QCE_WRITE_REG(nonce32[i], pce_dev->iobase +
+				CRYPTO_AUTH_INFO_NONCE0_REG +
+					(i*sizeof(uint32_t)));
+
+		if (creq->authklen ==  AES128_KEY_SIZE)
+			auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_128;
+		else {
+			if (creq->authklen ==  AES256_KEY_SIZE)
+				auth_cfg = pce_dev->reg.auth_cfg_aes_ccm_256;
+		}
+		if (creq->dir == QCE_ENCRYPT)
+			auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+		else
+			auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS);
+		auth_cfg |= ((creq->authsize - 1) << CRYPTO_AUTH_SIZE);
+
+		if (use_hw_key == true)	{
+			auth_cfg |= (1 << CRYPTO_USE_HW_KEY_AUTH);
+		} else {
+			auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+			/* write auth key */
+			for (i = 0; i < authklen32; i++)
+				QCE_WRITE_REG(enckey32[i], pce_dev->iobase +
+				CRYPTO_AUTH_KEY0_REG + (i*sizeof(uint32_t)));
+		}
+		QCE_WRITE_REG(auth_cfg, pce_dev->iobase +
+						CRYPTO_AUTH_SEG_CFG_REG);
+		if (creq->dir == QCE_ENCRYPT) {
+			QCE_WRITE_REG(totallen_in, pce_dev->iobase +
+						CRYPTO_AUTH_SEG_SIZE_REG);
+		} else {
+			QCE_WRITE_REG((totallen_in - creq->authsize),
+				pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+		}
+		QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+	} else {
+		if (creq->op != QCE_REQ_AEAD)
+			QCE_WRITE_REG(0, pce_dev->iobase +
+						CRYPTO_AUTH_SEG_CFG_REG);
+	}
+	/*
+	 * Ensure previous instructions (write to all AUTH registers)
+	 * was completed before accessing a register that is not in
+	 * in the same 1K range.
+	 */
+	mb();
+	switch (creq->mode) {
+	case QCE_MODE_ECB:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ecb_256;
+		break;
+	case QCE_MODE_CBC:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_cbc_256;
+		break;
+	case QCE_MODE_XTS:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_xts_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_xts_256;
+		break;
+	case QCE_MODE_CCM:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ccm_256;
+		break;
+	case QCE_MODE_CTR:
+	default:
+		if (key_size == AES128_KEY_SIZE)
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_128;
+		else
+			encr_cfg = pce_dev->reg.encr_cfg_aes_ctr_256;
+		break;
+	}
+
+	switch (creq->alg) {
+	case CIPHER_ALG_DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			encr_cfg = pce_dev->reg.encr_cfg_des_cbc;
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
+						CRYPTO_CNTR0_IV0_REG);
+			QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
+						CRYPTO_CNTR1_IV1_REG);
+		} else {
+			encr_cfg = pce_dev->reg.encr_cfg_des_ecb;
+		}
+		if (use_hw_key == false) {
+			QCE_WRITE_REG(enckey32[0], pce_dev->iobase +
+							CRYPTO_ENCR_KEY0_REG);
+			QCE_WRITE_REG(enckey32[1], pce_dev->iobase +
+							CRYPTO_ENCR_KEY1_REG);
+		}
+		break;
+	case CIPHER_ALG_3DES:
+		if (creq->mode !=  QCE_MODE_ECB) {
+			_byte_stream_to_net_words(enciv32, creq->iv, ivsize);
+			QCE_WRITE_REG(enciv32[0], pce_dev->iobase +
+						CRYPTO_CNTR0_IV0_REG);
+			QCE_WRITE_REG(enciv32[1], pce_dev->iobase +
+						CRYPTO_CNTR1_IV1_REG);
+			encr_cfg = pce_dev->reg.encr_cfg_3des_cbc;
+		} else {
+			encr_cfg = pce_dev->reg.encr_cfg_3des_ecb;
+		}
+		if (use_hw_key == false) {
+			/* write encr key */
+			for (i = 0; i < 6; i++)
+				QCE_WRITE_REG(enckey32[0], (pce_dev->iobase +
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t))));
+		}
+		break;
+	case CIPHER_ALG_AES:
+	default:
+		if (creq->mode ==  QCE_MODE_XTS) {
+			uint32_t xtskey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)]
+					= {0, 0, 0, 0, 0, 0, 0, 0};
+			uint32_t xtsklen =
+					creq->encklen/(2 * sizeof(uint32_t));
+
+			if ((use_hw_key == false) && (use_pipe_key == false)) {
+				_byte_stream_to_net_words(xtskey32,
+					(creq->enckey + creq->encklen/2),
+							creq->encklen/2);
+				/* write xts encr key */
+				for (i = 0; i < xtsklen; i++)
+					QCE_WRITE_REG(xtskey32[i],
+						pce_dev->iobase +
+						CRYPTO_ENCR_XTS_KEY0_REG +
+						(i * sizeof(uint32_t)));
+			}
+			/* write xts du size */
+			switch (creq->flags & QCRYPTO_CTX_XTS_MASK) {
+			case QCRYPTO_CTX_XTS_DU_SIZE_512B:
+				QCE_WRITE_REG(
+					min((uint32_t)QCE_SECTOR_SIZE,
+					creq->cryptlen), pce_dev->iobase +
+					CRYPTO_ENCR_XTS_DU_SIZE_REG);
+				break;
+			case QCRYPTO_CTX_XTS_DU_SIZE_1KB:
+				QCE_WRITE_REG(
+					min((uint32_t)(QCE_SECTOR_SIZE * 2),
+					creq->cryptlen), pce_dev->iobase +
+					CRYPTO_ENCR_XTS_DU_SIZE_REG);
+				break;
+			default:
+				QCE_WRITE_REG(creq->cryptlen,
+					pce_dev->iobase +
+					CRYPTO_ENCR_XTS_DU_SIZE_REG);
+				break;
+			}
+		}
+		if (creq->mode !=  QCE_MODE_ECB) {
+			if (creq->mode ==  QCE_MODE_XTS)
+				_byte_stream_swap_to_net_words(enciv32,
+							creq->iv, ivsize);
+			else
+				_byte_stream_to_net_words(enciv32, creq->iv,
+								ivsize);
+
+			/* write encr cntr iv */
+			for (i = 0; i <= 3; i++)
+				QCE_WRITE_REG(enciv32[i], pce_dev->iobase +
+							CRYPTO_CNTR0_IV0_REG +
+							(i * sizeof(uint32_t)));
+
+			if (creq->mode == QCE_MODE_CCM) {
+				/* write cntr iv for ccm */
+				for (i = 0; i <= 3; i++)
+					QCE_WRITE_REG(enciv32[i],
+						pce_dev->iobase +
+						CRYPTO_ENCR_CCM_INT_CNTR0_REG +
+							(i * sizeof(uint32_t)));
+				/* update cntr_iv[3] by one */
+				QCE_WRITE_REG((enciv32[3] + 1),
+							pce_dev->iobase +
+							CRYPTO_CNTR0_IV0_REG +
+							(3 * sizeof(uint32_t)));
+			}
+		}
+
+		if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) {
+			encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 <<
+					CRYPTO_ENCR_KEY_SZ);
+		} else {
+			if ((use_hw_key == false) && (use_pipe_key == false)) {
+				for (i = 0; i < enck_size_in_word; i++)
+					QCE_WRITE_REG(enckey32[i],
+						pce_dev->iobase +
+						CRYPTO_ENCR_KEY0_REG +
+						(i * sizeof(uint32_t)));
+			}
+		} /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */
+		break;
+	} /* end of switch (creq->mode)  */
+
+	if (use_pipe_key)
+		encr_cfg |= (CRYPTO_USE_PIPE_KEY_ENCR_ENABLED
+					<< CRYPTO_USE_PIPE_KEY_ENCR);
+
+	/* write encr seg cfg */
+	encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE;
+	if (use_hw_key == true)
+		encr_cfg |= (CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+	else
+		encr_cfg &= ~(CRYPTO_USE_HW_KEY << CRYPTO_USE_HW_KEY_ENCR);
+	/* write encr seg cfg */
+	QCE_WRITE_REG(encr_cfg, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write encr seg size */
+	if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT)) {
+		QCE_WRITE_REG((creq->cryptlen + creq->authsize),
+				pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+	} else {
+		QCE_WRITE_REG(creq->cryptlen,
+				pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+	}
+
+	/* write encr seg start */
+	QCE_WRITE_REG((coffset & 0xffff),
+			pce_dev->iobase + CRYPTO_ENCR_SEG_START_REG);
+
+	/* write encr counter mask */
+	QCE_WRITE_REG(0xffffffff,
+			pce_dev->iobase + CRYPTO_CNTR_MASK_REG);
+	QCE_WRITE_REG(0xffffffff,
+			pce_dev->iobase + CRYPTO_CNTR_MASK_REG0);
+	QCE_WRITE_REG(0xffffffff,
+			pce_dev->iobase + CRYPTO_CNTR_MASK_REG1);
+	QCE_WRITE_REG(0xffffffff,
+			pce_dev->iobase + CRYPTO_CNTR_MASK_REG2);
+
+	/* write seg size  */
+	QCE_WRITE_REG(totallen_in, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/* issue go to crypto   */
+	if (use_hw_key == false) {
+		QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+				(1 << CRYPTO_CLR_CNTXT)),
+				pce_dev->iobase + CRYPTO_GOPROC_REG);
+	} else {
+		QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP)),
+				pce_dev->iobase + CRYPTO_GOPROC_QC_KEY_REG);
+	}
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+};
+
+static int _ce_f9_setup_direct(struct qce_device *pce_dev,
+				 struct qce_f9_req *req)
+{
+	uint32_t ikey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+	uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+	uint32_t auth_cfg;
+	int i;
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		auth_cfg = pce_dev->reg.auth_cfg_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		auth_cfg = pce_dev->reg.auth_cfg_snow3g;
+		break;
+	};
+
+	/* clear status */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+
+	/* set big endian configuration */
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/*
+	 * Ensure previous instructions (setting the CONFIG register)
+	 * was completed before issuing starting to set other config register
+	 * This is to ensure the configurations are done in correct endian-ness
+	 * as set in the CONFIG registers
+	 */
+	mb();
+
+	/* write enc_seg_cfg */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write ecn_seg_size */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_ENCR_SEG_SIZE_REG);
+
+	/* write key in CRYPTO_AUTH_IV0-3_REG */
+	_byte_stream_to_net_words(ikey32, &req->ikey[0], OTA_KEY_SIZE);
+	for (i = 0; i < key_size_in_word; i++)
+		QCE_WRITE_REG(ikey32[i], (pce_dev->iobase +
+			(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t))));
+
+	/* write last bits  in CRYPTO_AUTH_IV4_REG  */
+	QCE_WRITE_REG(req->last_bits, (pce_dev->iobase +
+					CRYPTO_AUTH_IV4_REG));
+
+	/* write fresh to CRYPTO_AUTH_BYTECNT0_REG */
+	QCE_WRITE_REG(req->fresh, (pce_dev->iobase +
+					 CRYPTO_AUTH_BYTECNT0_REG));
+
+	/* write count-i  to CRYPTO_AUTH_BYTECNT1_REG */
+	QCE_WRITE_REG(req->count_i, (pce_dev->iobase +
+					 CRYPTO_AUTH_BYTECNT1_REG));
+
+	/* write auth seg cfg */
+	if (req->direction == QCE_OTA_DIR_DOWNLINK)
+		auth_cfg |= BIT(CRYPTO_F9_DIRECTION);
+	QCE_WRITE_REG(auth_cfg, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+
+	/* write auth seg size */
+	QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+	/* write auth seg start*/
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_START_REG);
+
+	/* write seg size  */
+	QCE_WRITE_REG(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG);
+
+	/* set little endian configuration before go*/
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/* write go */
+	QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+				(1 << CRYPTO_CLR_CNTXT)),
+				pce_dev->iobase +  CRYPTO_GOPROC_REG);
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+}
+
+static int _ce_f8_setup_direct(struct qce_device *pce_dev,
+		struct qce_f8_req *req, bool key_stream_mode,
+		uint16_t npkts, uint16_t cipher_offset, uint16_t cipher_size)
+{
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t ckey32[OTA_KEY_SIZE/sizeof(uint32_t)];
+	uint32_t key_size_in_word = OTA_KEY_SIZE/sizeof(uint32_t);
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		encr_cfg = pce_dev->reg.encr_cfg_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		encr_cfg = pce_dev->reg.encr_cfg_snow3g;
+		break;
+	};
+	/* clear status */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_STATUS_REG);
+	/* set big endian configuration */
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/* write auth seg configuration */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG);
+	/* write auth seg size */
+	QCE_WRITE_REG(0, pce_dev->iobase + CRYPTO_AUTH_SEG_SIZE_REG);
+
+	/* write key */
+	_byte_stream_to_net_words(ckey32, &req->ckey[0], OTA_KEY_SIZE);
+
+	for (i = 0; i < key_size_in_word; i++)
+		QCE_WRITE_REG(ckey32[i], (pce_dev->iobase +
+			(CRYPTO_ENCR_KEY0_REG + i*sizeof(uint32_t))));
+	/* write encr seg cfg */
+	if (key_stream_mode)
+		encr_cfg |= BIT(CRYPTO_F8_KEYSTREAM_ENABLE);
+	if (req->direction == QCE_OTA_DIR_DOWNLINK)
+		encr_cfg |= BIT(CRYPTO_F8_DIRECTION);
+	QCE_WRITE_REG(encr_cfg, pce_dev->iobase +
+		CRYPTO_ENCR_SEG_CFG_REG);
+
+	/* write encr seg start */
+	QCE_WRITE_REG((cipher_offset & 0xffff), pce_dev->iobase +
+		CRYPTO_ENCR_SEG_START_REG);
+	/* write encr seg size  */
+	QCE_WRITE_REG(cipher_size, pce_dev->iobase +
+		CRYPTO_ENCR_SEG_SIZE_REG);
+
+	/* write seg size  */
+	QCE_WRITE_REG(req->data_len, pce_dev->iobase +
+		CRYPTO_SEG_SIZE_REG);
+
+	/* write cntr0_iv0 for countC */
+	QCE_WRITE_REG(req->count_c, pce_dev->iobase +
+		CRYPTO_CNTR0_IV0_REG);
+	/* write cntr1_iv1 for nPkts, and bearer */
+	if (npkts == 1)
+		npkts = 0;
+	QCE_WRITE_REG(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER |
+				npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT,
+			pce_dev->iobase + CRYPTO_CNTR1_IV1_REG);
+
+	/* set little endian configuration before go*/
+	QCE_WRITE_REG(pce_dev->reg.crypto_cfg_le, (pce_dev->iobase +
+							CRYPTO_CONFIG_REG));
+	/* write go */
+	QCE_WRITE_REG(((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+				(1 << CRYPTO_CLR_CNTXT)),
+				pce_dev->iobase +  CRYPTO_GOPROC_REG);
+	/*
+	 * Ensure previous instructions (setting the GO register)
+	 * was completed before issuing a DMA transfer request
+	 */
+	mb();
+	return 0;
+}
+
+
+static int _qce_unlock_other_pipes(struct qce_device *pce_dev, int req_info)
+{
+	int rc = 0;
+	struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info
+						[req_info].ce_sps;
+
+	if (pce_dev->no_get_around || pce_dev->support_cmd_dscr == false)
+		return rc;
+
+	rc = sps_transfer_one(pce_dev->ce_bam_info.consumer.pipe,
+		GET_PHYS_ADDR(pce_sps_data->
+				cmdlistptr.unlock_all_pipes.cmdlist),
+		0, NULL, (SPS_IOVEC_FLAG_CMD | SPS_IOVEC_FLAG_UNLOCK));
+	if (rc) {
+		pr_err("sps_xfr_one() fail rc=%d", rc);
+		rc = -EINVAL;
+	}
+	return rc;
+}
+
+static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
+		bool is_complete);
+
+static int _aead_complete(struct qce_device *pce_dev, int req_info)
+{
+	struct aead_request *areq;
+	unsigned char mac[SHA256_DIGEST_SIZE];
+	uint32_t ccm_fail_status = 0;
+	uint32_t result_dump_status;
+	int32_t result_status = 0;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	qce_comp_func_ptr_t qce_callback;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	qce_callback = preq_info->qce_cb;
+	areq = (struct aead_request *) preq_info->areq;
+	if (areq->src != areq->dst) {
+		qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+					DMA_FROM_DEVICE);
+	}
+	qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+
+	if (preq_info->asg)
+		qce_dma_unmap_sg(pce_dev->pdev, preq_info->asg,
+			preq_info->assoc_nents, DMA_TO_DEVICE);
+	/* check MAC */
+	memcpy(mac, (char *)(&pce_sps_data->result->auth_iv[0]),
+						SHA256_DIGEST_SIZE);
+
+	/* read status before unlock */
+	if (preq_info->dir == QCE_DECRYPT) {
+		if (pce_dev->no_get_around)
+			if (pce_dev->no_ccm_mac_status_get_around)
+				ccm_fail_status = be32_to_cpu(pce_sps_data->
+							result->status);
+			else
+				ccm_fail_status = be32_to_cpu(pce_sps_data->
+							result_null->status);
+		else
+			ccm_fail_status = readl_relaxed(pce_dev->iobase +
+					CRYPTO_STATUS_REG);
+	}
+	if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, mac, NULL, -ENXIO);
+		return -ENXIO;
+	}
+	result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+	pce_sps_data->result->status = 0;
+
+	if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+			| (1 <<  CRYPTO_HSD_ERR))) {
+		pr_err("aead operation error. Status %x\n", result_dump_status);
+		result_status = -ENXIO;
+	} else if (pce_sps_data->consumer_status |
+			pce_sps_data->producer_status)  {
+		pr_err("aead sps operation error. sps status %x %x\n",
+				pce_sps_data->consumer_status,
+				pce_sps_data->producer_status);
+		result_status = -ENXIO;
+	}
+
+	if (preq_info->mode == QCE_MODE_CCM) {
+		/*
+		 * Not from result dump, instead, use the status we just
+		 * read of device for MAC_FAILED.
+		 */
+		if (result_status == 0 && (preq_info->dir == QCE_DECRYPT) &&
+				(ccm_fail_status & (1 << CRYPTO_MAC_FAILED)))
+			result_status = -EBADMSG;
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, mac, NULL, result_status);
+
+	} else {
+		uint32_t ivsize = 0;
+		struct crypto_aead *aead;
+		unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
+
+		aead = crypto_aead_reqtfm(areq);
+		ivsize = crypto_aead_ivsize(aead);
+		memcpy(iv, (char *)(pce_sps_data->result->encr_cntr_iv),
+			sizeof(iv));
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, mac, iv, result_status);
+
+	}
+	return 0;
+};
+
+static int _sha_complete(struct qce_device *pce_dev, int req_info)
+{
+	struct ahash_request *areq;
+	unsigned char digest[SHA256_DIGEST_SIZE];
+	uint32_t bytecount32[2];
+	int32_t result_status = 0;
+	uint32_t result_dump_status;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	qce_comp_func_ptr_t qce_callback;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	qce_callback = preq_info->qce_cb;
+	areq = (struct ahash_request *) preq_info->areq;
+	qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+				DMA_TO_DEVICE);
+	memcpy(digest, (char *)(&pce_sps_data->result->auth_iv[0]),
+						SHA256_DIGEST_SIZE);
+	_byte_stream_to_net_words(bytecount32,
+		(unsigned char *)pce_sps_data->result->auth_byte_count,
+					2 * CRYPTO_REG_SIZE);
+
+	if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, digest, (char *)bytecount32,
+				-ENXIO);
+		return -ENXIO;
+	}
+
+	result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+	pce_sps_data->result->status = 0;
+	if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+			| (1 <<  CRYPTO_HSD_ERR))) {
+
+		pr_err("sha operation error. Status %x\n", result_dump_status);
+		result_status = -ENXIO;
+	} else if (pce_sps_data->consumer_status) {
+		pr_err("sha sps operation error. sps status %x\n",
+			pce_sps_data->consumer_status);
+		result_status = -ENXIO;
+	}
+	qce_free_req_info(pce_dev, req_info, true);
+	qce_callback(areq, digest, (char *)bytecount32, result_status);
+	return 0;
+}
+
+static int _f9_complete(struct qce_device *pce_dev, int req_info)
+{
+	uint32_t mac_i;
+	int32_t result_status = 0;
+	uint32_t result_dump_status;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	qce_comp_func_ptr_t qce_callback;
+	void *areq;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	qce_callback = preq_info->qce_cb;
+	areq = preq_info->areq;
+	dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+				preq_info->ota_size, DMA_TO_DEVICE);
+	_byte_stream_to_net_words(&mac_i,
+		(char *)(&pce_sps_data->result->auth_iv[0]),
+		CRYPTO_REG_SIZE);
+
+	if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, NULL, NULL, -ENXIO);
+		return -ENXIO;
+	}
+
+	result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+	pce_sps_data->result->status = 0;
+	if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+				| (1 <<  CRYPTO_HSD_ERR))) {
+		pr_err("f9 operation error. Status %x\n", result_dump_status);
+		result_status = -ENXIO;
+	} else if (pce_sps_data->consumer_status |
+				pce_sps_data->producer_status)  {
+		pr_err("f9 sps operation error. sps status %x %x\n",
+				pce_sps_data->consumer_status,
+				pce_sps_data->producer_status);
+		result_status = -ENXIO;
+	}
+	qce_free_req_info(pce_dev, req_info, true);
+	qce_callback(areq, (char *)&mac_i, NULL, result_status);
+
+	return 0;
+}
+
+static int _ablk_cipher_complete(struct qce_device *pce_dev, int req_info)
+{
+	struct ablkcipher_request *areq;
+	unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
+	int32_t result_status = 0;
+	uint32_t result_dump_status;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	qce_comp_func_ptr_t qce_callback;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	qce_callback = preq_info->qce_cb;
+	areq = (struct ablkcipher_request *) preq_info->areq;
+	if (areq->src != areq->dst) {
+		qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
+			preq_info->dst_nents, DMA_FROM_DEVICE);
+	}
+	qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+		(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+						DMA_TO_DEVICE);
+
+	if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, NULL, NULL, -ENXIO);
+		return -ENXIO;
+	}
+	result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+	pce_sps_data->result->status = 0;
+
+	if (result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+			| (1 <<  CRYPTO_HSD_ERR))) {
+		pr_err("ablk_cipher operation error. Status %x\n",
+				result_dump_status);
+		result_status = -ENXIO;
+	} else if (pce_sps_data->consumer_status |
+				pce_sps_data->producer_status)  {
+		pr_err("ablk_cipher sps operation error. sps status %x %x\n",
+				pce_sps_data->consumer_status,
+				pce_sps_data->producer_status);
+		result_status = -ENXIO;
+	}
+
+	if (preq_info->mode == QCE_MODE_ECB) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, NULL, NULL, pce_sps_data->consumer_status |
+								result_status);
+	} else {
+		if (pce_dev->ce_bam_info.minor_version == 0) {
+			if (preq_info->mode == QCE_MODE_CBC) {
+				if  (preq_info->dir == QCE_DECRYPT)
+					memcpy(iv, (char *)preq_info->dec_iv,
+								sizeof(iv));
+				else
+					memcpy(iv, (unsigned char *)
+						(sg_virt(areq->src) +
+						areq->src->length - 16),
+						sizeof(iv));
+			}
+			if ((preq_info->mode == QCE_MODE_CTR) ||
+				(preq_info->mode == QCE_MODE_XTS)) {
+				uint32_t num_blk = 0;
+				uint32_t cntr_iv3 = 0;
+				unsigned long long cntr_iv64 = 0;
+				unsigned char *b = (unsigned char *)(&cntr_iv3);
+
+				memcpy(iv, areq->info, sizeof(iv));
+				if (preq_info->mode != QCE_MODE_XTS)
+					num_blk = areq->nbytes/16;
+				else
+					num_blk = 1;
+				cntr_iv3 =  ((*(iv + 12) << 24) & 0xff000000) |
+					(((*(iv + 13)) << 16) & 0xff0000) |
+					(((*(iv + 14)) << 8) & 0xff00) |
+					(*(iv + 15) & 0xff);
+				cntr_iv64 =
+					(((unsigned long long)cntr_iv3 &
+					0xFFFFFFFFULL) +
+					(unsigned long long)num_blk) %
+					(unsigned long long)(0x100000000ULL);
+
+				cntr_iv3 = (u32)(cntr_iv64 & 0xFFFFFFFF);
+				*(iv + 15) = (char)(*b);
+				*(iv + 14) = (char)(*(b + 1));
+				*(iv + 13) = (char)(*(b + 2));
+				*(iv + 12) = (char)(*(b + 3));
+			}
+		} else {
+			memcpy(iv,
+				(char *)(pce_sps_data->result->encr_cntr_iv),
+				sizeof(iv));
+		}
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, NULL, iv, result_status);
+	}
+	return 0;
+}
+
+static int _f8_complete(struct qce_device *pce_dev, int req_info)
+{
+	int32_t result_status = 0;
+	uint32_t result_dump_status;
+	uint32_t result_dump_status2;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+	qce_comp_func_ptr_t qce_callback;
+	void *areq;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	qce_callback = preq_info->qce_cb;
+	areq = preq_info->areq;
+	if (preq_info->phy_ota_dst)
+		dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
+				preq_info->ota_size, DMA_FROM_DEVICE);
+	if (preq_info->phy_ota_src)
+		dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+				preq_info->ota_size, (preq_info->phy_ota_dst) ?
+				DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
+
+	if (_qce_unlock_other_pipes(pce_dev, req_info)) {
+		qce_free_req_info(pce_dev, req_info, true);
+		qce_callback(areq, NULL, NULL, -ENXIO);
+		return -ENXIO;
+	}
+	result_dump_status = be32_to_cpu(pce_sps_data->result->status);
+	result_dump_status2 = be32_to_cpu(pce_sps_data->result->status2);
+
+	if ((result_dump_status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
+			| (1 <<  CRYPTO_HSD_ERR)))) {
+		pr_err(
+			"f8 oper error. Dump Sta %x Sta2 %x req %d\n",
+			result_dump_status, result_dump_status2, req_info);
+		result_status = -ENXIO;
+	} else if (pce_sps_data->consumer_status |
+				pce_sps_data->producer_status)  {
+		pr_err("f8 sps operation error. sps status %x %x\n",
+				pce_sps_data->consumer_status,
+				pce_sps_data->producer_status);
+		result_status = -ENXIO;
+	}
+	pce_sps_data->result->status = 0;
+	pce_sps_data->result->status2 = 0;
+	qce_free_req_info(pce_dev, req_info, true);
+	qce_callback(areq, NULL, NULL, result_status);
+	return 0;
+}
+
+static void _qce_sps_iovec_count_init(struct qce_device *pce_dev, int req_info)
+{
+	struct ce_sps_data *pce_sps_data = &pce_dev->ce_request_info[req_info]
+							.ce_sps;
+	pce_sps_data->in_transfer.iovec_count = 0;
+	pce_sps_data->out_transfer.iovec_count = 0;
+}
+
+static void _qce_set_flag(struct sps_transfer *sps_bam_pipe, uint32_t flag)
+{
+	struct sps_iovec *iovec;
+
+	if (sps_bam_pipe->iovec_count == 0)
+		return;
+	iovec  = sps_bam_pipe->iovec + (sps_bam_pipe->iovec_count - 1);
+	iovec->flags |= flag;
+}
+
+static int _qce_sps_add_data(dma_addr_t paddr, uint32_t len,
+		struct sps_transfer *sps_bam_pipe)
+{
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+					sps_bam_pipe->iovec_count;
+	uint32_t data_cnt;
+
+	while (len > 0) {
+		if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+			pr_err("Num of descrptor %d exceed max (%d)",
+				sps_bam_pipe->iovec_count,
+				(uint32_t)QCE_MAX_NUM_DSCR);
+			return -ENOMEM;
+		}
+		if (len > SPS_MAX_PKT_SIZE)
+			data_cnt = SPS_MAX_PKT_SIZE;
+		else
+			data_cnt = len;
+		iovec->size = data_cnt;
+		iovec->addr = SPS_GET_LOWER_ADDR(paddr);
+		iovec->flags = SPS_GET_UPPER_ADDR(paddr);
+		sps_bam_pipe->iovec_count++;
+		iovec++;
+		paddr += data_cnt;
+		len -= data_cnt;
+	}
+	return 0;
+}
+
+static int _qce_sps_add_sg_data(struct qce_device *pce_dev,
+		struct scatterlist *sg_src, uint32_t nbytes,
+		struct sps_transfer *sps_bam_pipe)
+{
+	uint32_t data_cnt, len;
+	dma_addr_t addr;
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+						sps_bam_pipe->iovec_count;
+
+	while (nbytes > 0) {
+		len = min(nbytes, sg_dma_len(sg_src));
+		nbytes -= len;
+		addr = sg_dma_address(sg_src);
+		if (pce_dev->ce_bam_info.minor_version == 0)
+			len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
+		while (len > 0) {
+			if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+				pr_err("Num of descrptor %d exceed max (%d)",
+						sps_bam_pipe->iovec_count,
+						(uint32_t)QCE_MAX_NUM_DSCR);
+				return -ENOMEM;
+			}
+			if (len > SPS_MAX_PKT_SIZE) {
+				data_cnt = SPS_MAX_PKT_SIZE;
+				iovec->size = data_cnt;
+				iovec->addr = SPS_GET_LOWER_ADDR(addr);
+				iovec->flags = SPS_GET_UPPER_ADDR(addr);
+			} else {
+				data_cnt = len;
+				iovec->size = data_cnt;
+				iovec->addr = SPS_GET_LOWER_ADDR(addr);
+				iovec->flags = SPS_GET_UPPER_ADDR(addr);
+			}
+			iovec++;
+			sps_bam_pipe->iovec_count++;
+			addr += data_cnt;
+			len -= data_cnt;
+		}
+		sg_src = sg_next(sg_src);
+	}
+	return 0;
+}
+
+static int _qce_sps_add_sg_data_off(struct qce_device *pce_dev,
+		struct scatterlist *sg_src, uint32_t nbytes, uint32_t off,
+		struct sps_transfer *sps_bam_pipe)
+{
+	uint32_t data_cnt, len;
+	dma_addr_t addr;
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+						sps_bam_pipe->iovec_count;
+	unsigned int res_within_sg;
+
+	if (!sg_src)
+		return -ENOENT;
+	res_within_sg = sg_dma_len(sg_src);
+
+	while (off > 0) {
+		if (!sg_src) {
+			pr_err("broken sg list off %d nbytes %d\n",
+				off, nbytes);
+			return -ENOENT;
+		}
+		len = sg_dma_len(sg_src);
+		if (off < len) {
+			res_within_sg = len - off;
+			break;
+		}
+		off -= len;
+		sg_src = sg_next(sg_src);
+		if (sg_src)
+			res_within_sg = sg_dma_len(sg_src);
+	}
+	while (nbytes > 0 && sg_src) {
+		len = min(nbytes, res_within_sg);
+		nbytes -= len;
+		addr = sg_dma_address(sg_src) + off;
+		if (pce_dev->ce_bam_info.minor_version == 0)
+			len = ALIGN(len, pce_dev->ce_bam_info.ce_burst_size);
+		while (len > 0) {
+			if (sps_bam_pipe->iovec_count == QCE_MAX_NUM_DSCR) {
+				pr_err("Num of descrptor %d exceed max (%d)",
+						sps_bam_pipe->iovec_count,
+						(uint32_t)QCE_MAX_NUM_DSCR);
+				return -ENOMEM;
+			}
+			if (len > SPS_MAX_PKT_SIZE) {
+				data_cnt = SPS_MAX_PKT_SIZE;
+				iovec->size = data_cnt;
+				iovec->addr = SPS_GET_LOWER_ADDR(addr);
+				iovec->flags = SPS_GET_UPPER_ADDR(addr);
+			} else {
+				data_cnt = len;
+				iovec->size = data_cnt;
+				iovec->addr = SPS_GET_LOWER_ADDR(addr);
+				iovec->flags = SPS_GET_UPPER_ADDR(addr);
+			}
+			iovec++;
+			sps_bam_pipe->iovec_count++;
+			addr += data_cnt;
+			len -= data_cnt;
+		}
+		if (nbytes) {
+			sg_src = sg_next(sg_src);
+			if (!sg_src) {
+				pr_err("more data bytes %d\n", nbytes);
+				return -ENOMEM;
+			}
+			res_within_sg = sg_dma_len(sg_src);
+			off = 0;
+		}
+	}
+	return 0;
+}
+
+static int _qce_sps_add_cmd(struct qce_device *pce_dev, uint32_t flag,
+				struct qce_cmdlist_info *cmdptr,
+				struct sps_transfer *sps_bam_pipe)
+{
+	dma_addr_t  paddr = GET_PHYS_ADDR(cmdptr->cmdlist);
+	struct sps_iovec *iovec = sps_bam_pipe->iovec +
+					sps_bam_pipe->iovec_count;
+	iovec->size = cmdptr->size;
+	iovec->addr = SPS_GET_LOWER_ADDR(paddr);
+	iovec->flags = SPS_GET_UPPER_ADDR(paddr) | SPS_IOVEC_FLAG_CMD | flag;
+	sps_bam_pipe->iovec_count++;
+	if (sps_bam_pipe->iovec_count >= QCE_MAX_NUM_DSCR) {
+		pr_err("Num of descrptor %d exceed max (%d)",
+			sps_bam_pipe->iovec_count, (uint32_t)QCE_MAX_NUM_DSCR);
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static int _qce_sps_transfer(struct qce_device *pce_dev, int req_info)
+{
+	int rc = 0;
+	struct ce_sps_data *pce_sps_data;
+
+	pce_sps_data = &pce_dev->ce_request_info[req_info].ce_sps;
+	pce_sps_data->out_transfer.user =
+		(void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
+					(unsigned int) req_info));
+	pce_sps_data->in_transfer.user =
+		(void *)((uintptr_t)(CRYPTO_REQ_USER_PAT |
+					(unsigned int) req_info));
+	_qce_dump_descr_fifos_dbg(pce_dev, req_info);
+
+	if (pce_sps_data->in_transfer.iovec_count) {
+		rc = sps_transfer(pce_dev->ce_bam_info.consumer.pipe,
+					  &pce_sps_data->in_transfer);
+		if (rc) {
+			pr_err("sps_xfr() fail (consumer pipe=0x%lx) rc = %d\n",
+				(uintptr_t)pce_dev->ce_bam_info.consumer.pipe,
+				rc);
+			goto ret;
+		}
+	}
+	rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe,
+					  &pce_sps_data->out_transfer);
+	if (rc)
+		pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
+			(uintptr_t)pce_dev->ce_bam_info.producer.pipe, rc);
+ret:
+	if (rc)
+		_qce_dump_descr_fifos(pce_dev, req_info);
+	return rc;
+}
+
+/**
+ * Allocate and Connect a CE peripheral's SPS endpoint
+ *
+ * This function allocates endpoint context and
+ * connect it with memory endpoint by calling
+ * appropriate SPS driver APIs.
+ *
+ * Also registers a SPS callback function with
+ * SPS driver
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ * @ep   - Pointer to sps endpoint data structure
+ * @is_produce - 1 means Producer endpoint
+ *		 0 means Consumer endpoint
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int qce_sps_init_ep_conn(struct qce_device *pce_dev,
+				struct qce_sps_ep_conn_data *ep,
+				bool is_producer)
+{
+	int rc = 0;
+	struct sps_pipe *sps_pipe_info;
+	struct sps_connect *sps_connect_info = &ep->connect;
+	struct sps_register_event *sps_event = &ep->event;
+
+	/* Allocate endpoint context */
+	sps_pipe_info = sps_alloc_endpoint();
+	if (!sps_pipe_info) {
+		pr_err("sps_alloc_endpoint() failed!!! is_producer=%d",
+			   is_producer);
+		rc = -ENOMEM;
+		goto out;
+	}
+	/* Now save the sps pipe handle */
+	ep->pipe = sps_pipe_info;
+
+	/* Get default connection configuration for an endpoint */
+	rc = sps_get_config(sps_pipe_info, sps_connect_info);
+	if (rc) {
+		pr_err("sps_get_config() fail pipe_handle=0x%lx, rc = %d\n",
+				(uintptr_t)sps_pipe_info, rc);
+		goto get_config_err;
+	}
+
+	/* Modify the default connection configuration */
+	if (is_producer) {
+		/*
+		 * For CE producer transfer, source should be
+		 * CE peripheral where as destination should
+		 * be system memory.
+		 */
+		sps_connect_info->source = pce_dev->ce_bam_info.bam_handle;
+		sps_connect_info->destination = SPS_DEV_HANDLE_MEM;
+		/* Producer pipe will handle this connection */
+		sps_connect_info->mode = SPS_MODE_SRC;
+		sps_connect_info->options =
+			SPS_O_AUTO_ENABLE | SPS_O_DESC_DONE;
+	} else {
+		/* For CE consumer transfer, source should be
+		 * system memory where as destination should
+		 * CE peripheral
+		 */
+		sps_connect_info->source = SPS_DEV_HANDLE_MEM;
+		sps_connect_info->destination = pce_dev->ce_bam_info.bam_handle;
+		sps_connect_info->mode = SPS_MODE_DEST;
+		sps_connect_info->options =
+			SPS_O_AUTO_ENABLE;
+	}
+
+	/* Producer pipe index */
+	sps_connect_info->src_pipe_index =
+				pce_dev->ce_bam_info.src_pipe_index;
+	/* Consumer pipe index */
+	sps_connect_info->dest_pipe_index =
+				pce_dev->ce_bam_info.dest_pipe_index;
+	/* Set pipe group */
+	sps_connect_info->lock_group = pce_dev->ce_bam_info.pipe_pair_index;
+	sps_connect_info->event_thresh = 0x10;
+	/*
+	 * Max. no of scatter/gather buffers that can
+	 * be passed by block layer = 32 (NR_SG).
+	 * Each BAM descritor needs 64 bits (8 bytes).
+	 * One BAM descriptor is required per buffer transfer.
+	 * So we would require total 256 (32 * 8) bytes of descriptor FIFO.
+	 * But due to HW limitation we need to allocate atleast one extra
+	 * descriptor memory (256 bytes + 8 bytes). But in order to be
+	 * in power of 2, we are allocating 512 bytes of memory.
+	 */
+	sps_connect_info->desc.size = QCE_MAX_NUM_DSCR * MAX_QCE_ALLOC_BAM_REQ *
+					sizeof(struct sps_iovec);
+	if (sps_connect_info->desc.size > MAX_SPS_DESC_FIFO_SIZE)
+		sps_connect_info->desc.size = MAX_SPS_DESC_FIFO_SIZE;
+	sps_connect_info->desc.base = dma_alloc_coherent(pce_dev->pdev,
+					sps_connect_info->desc.size,
+					&sps_connect_info->desc.phys_base,
+					GFP_KERNEL);
+	if (sps_connect_info->desc.base == NULL) {
+		rc = -ENOMEM;
+		pr_err("Can not allocate coherent memory for sps data\n");
+		goto get_config_err;
+	}
+
+	memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
+
+	/* Establish connection between peripheral and memory endpoint */
+	rc = sps_connect(sps_pipe_info, sps_connect_info);
+	if (rc) {
+		pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
+				(uintptr_t)sps_pipe_info, rc);
+		goto sps_connect_err;
+	}
+
+	sps_event->mode = SPS_TRIGGER_CALLBACK;
+	sps_event->xfer_done = NULL;
+	sps_event->user = (void *)pce_dev;
+	if (is_producer) {
+		sps_event->options = SPS_O_EOT | SPS_O_DESC_DONE;
+		sps_event->callback = _sps_producer_callback;
+		rc = sps_register_event(ep->pipe, sps_event);
+		if (rc) {
+			pr_err("Producer callback registration failed rc=%d\n",
+									rc);
+			goto sps_connect_err;
+		}
+	} else {
+		sps_event->options = SPS_O_EOT;
+		sps_event->callback = NULL;
+	}
+
+	pr_debug("success, %s : pipe_handle=0x%lx, desc fifo base (phy) = 0x%p\n",
+		is_producer ? "PRODUCER(RX/OUT)" : "CONSUMER(TX/IN)",
+		(uintptr_t)sps_pipe_info, &sps_connect_info->desc.phys_base);
+	goto out;
+
+sps_connect_err:
+	dma_free_coherent(pce_dev->pdev,
+			sps_connect_info->desc.size,
+			sps_connect_info->desc.base,
+			sps_connect_info->desc.phys_base);
+get_config_err:
+	sps_free_endpoint(sps_pipe_info);
+out:
+	return rc;
+}
+
+/**
+ * Disconnect and Deallocate a CE peripheral's SPS endpoint
+ *
+ * This function disconnect endpoint and deallocates
+ * endpoint context.
+ *
+ * This function should only be called once typically
+ * during driver remove.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ * @ep   - Pointer to sps endpoint data structure
+ *
+ */
+static void qce_sps_exit_ep_conn(struct qce_device *pce_dev,
+				struct qce_sps_ep_conn_data *ep)
+{
+	struct sps_pipe *sps_pipe_info = ep->pipe;
+	struct sps_connect *sps_connect_info = &ep->connect;
+
+	sps_disconnect(sps_pipe_info);
+	dma_free_coherent(pce_dev->pdev,
+			sps_connect_info->desc.size,
+			sps_connect_info->desc.base,
+			sps_connect_info->desc.phys_base);
+	sps_free_endpoint(sps_pipe_info);
+}
+
+static void qce_sps_release_bam(struct qce_device *pce_dev)
+{
+	struct bam_registration_info *pbam;
+
+	mutex_lock(&bam_register_lock);
+	pbam = pce_dev->pbam;
+	if (pbam == NULL)
+		goto ret;
+
+	pbam->cnt--;
+	if (pbam->cnt > 0)
+		goto ret;
+
+	if (pce_dev->ce_bam_info.bam_handle) {
+		sps_deregister_bam_device(pce_dev->ce_bam_info.bam_handle);
+
+		pr_debug("deregister bam handle 0x%lx\n",
+					pce_dev->ce_bam_info.bam_handle);
+		pce_dev->ce_bam_info.bam_handle = 0;
+	}
+	iounmap(pbam->bam_iobase);
+	pr_debug("delete bam 0x%x\n", pbam->bam_mem);
+	list_del(&pbam->qlist);
+	kfree(pbam);
+
+ret:
+	pce_dev->pbam = NULL;
+	mutex_unlock(&bam_register_lock);
+}
+
+static int qce_sps_get_bam(struct qce_device *pce_dev)
+{
+	int rc = 0;
+	struct sps_bam_props bam = {0};
+	struct bam_registration_info *pbam = NULL;
+	struct bam_registration_info *p;
+	uint32_t bam_cfg = 0;
+
+
+	mutex_lock(&bam_register_lock);
+
+	list_for_each_entry(p, &qce50_bam_list, qlist) {
+		if (p->bam_mem == pce_dev->bam_mem) {
+			pbam = p;  /* found */
+			break;
+		}
+	}
+
+	if (pbam) {
+		pr_debug("found bam 0x%x\n", pbam->bam_mem);
+		pbam->cnt++;
+		pce_dev->ce_bam_info.bam_handle =  pbam->handle;
+		pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
+		pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
+		pce_dev->pbam = pbam;
+		pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
+		goto ret;
+	}
+
+	pbam = kzalloc(sizeof(struct  bam_registration_info), GFP_KERNEL);
+	if (!pbam) {
+		rc = -ENOMEM;
+		goto ret;
+	}
+	pbam->cnt = 1;
+	pbam->bam_mem = pce_dev->bam_mem;
+	pbam->bam_iobase = ioremap_nocache(pce_dev->bam_mem,
+					pce_dev->bam_mem_size);
+	if (!pbam->bam_iobase) {
+		kfree(pbam);
+		rc = -ENOMEM;
+		pr_err("Can not map BAM io memory\n");
+		goto ret;
+	}
+	pce_dev->ce_bam_info.bam_mem = pbam->bam_mem;
+	pce_dev->ce_bam_info.bam_iobase = pbam->bam_iobase;
+	pbam->handle = 0;
+	pr_debug("allocate bam 0x%x\n", pbam->bam_mem);
+	bam_cfg = readl_relaxed(pce_dev->ce_bam_info.bam_iobase +
+					CRYPTO_BAM_CNFG_BITS_REG);
+	pbam->support_cmd_dscr =  (bam_cfg & CRYPTO_BAM_CD_ENABLE_MASK) ?
+					true : false;
+	if (pbam->support_cmd_dscr == false) {
+		pr_info("qce50 don't support command descriptor. bam_cfg%x\n",
+							bam_cfg);
+		pce_dev->no_get_around = false;
+	}
+	pce_dev->support_cmd_dscr = pbam->support_cmd_dscr;
+
+	bam.phys_addr = pce_dev->ce_bam_info.bam_mem;
+	bam.virt_addr = pce_dev->ce_bam_info.bam_iobase;
+
+	/*
+	 * This event thresold value is only significant for BAM-to-BAM
+	 * transfer. It's ignored for BAM-to-System mode transfer.
+	 */
+	bam.event_threshold = 0x10;	/* Pipe event threshold */
+	/*
+	 * This threshold controls when the BAM publish
+	 * the descriptor size on the sideband interface.
+	 * SPS HW will only be used when
+	 * data transfer size >  64 bytes.
+	 */
+	bam.summing_threshold = 64;
+	/* SPS driver wll handle the crypto BAM IRQ */
+	bam.irq = (u32)pce_dev->ce_bam_info.bam_irq;
+	/*
+	 * Set flag to indicate BAM global device control is managed
+	 * remotely.
+	 */
+	if ((pce_dev->support_cmd_dscr == false) || (pce_dev->is_shared))
+		bam.manage = SPS_BAM_MGR_DEVICE_REMOTE;
+	else
+		bam.manage = SPS_BAM_MGR_LOCAL;
+
+	bam.ee = pce_dev->ce_bam_info.bam_ee;
+	bam.ipc_loglevel = QCE_BAM_DEFAULT_IPC_LOGLVL;
+	bam.options |= SPS_BAM_CACHED_WP;
+	pr_debug("bam physical base=0x%lx\n", (uintptr_t)bam.phys_addr);
+	pr_debug("bam virtual base=0x%p\n", bam.virt_addr);
+
+	/* Register CE Peripheral BAM device to SPS driver */
+	rc = sps_register_bam_device(&bam, &pbam->handle);
+	if (rc) {
+		pr_err("sps_register_bam_device() failed! err=%d", rc);
+		rc = -EIO;
+		iounmap(pbam->bam_iobase);
+		kfree(pbam);
+		goto ret;
+	}
+
+	pce_dev->pbam = pbam;
+	list_add_tail(&pbam->qlist, &qce50_bam_list);
+	pce_dev->ce_bam_info.bam_handle =  pbam->handle;
+
+ret:
+	mutex_unlock(&bam_register_lock);
+
+	return rc;
+}
+/**
+ * Initialize SPS HW connected with CE core
+ *
+ * This function register BAM HW resources with
+ * SPS driver and then initialize 2 SPS endpoints
+ *
+ * This function should only be called once typically
+ * during driver probe.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ *
+ * @return - 0 if successful else negative value.
+ *
+ */
+static int qce_sps_init(struct qce_device *pce_dev)
+{
+	int rc = 0;
+
+	rc = qce_sps_get_bam(pce_dev);
+	if (rc)
+		return rc;
+	pr_debug("BAM device registered. bam_handle=0x%lx\n",
+		pce_dev->ce_bam_info.bam_handle);
+
+	rc = qce_sps_init_ep_conn(pce_dev,
+			&pce_dev->ce_bam_info.producer, true);
+	if (rc)
+		goto sps_connect_producer_err;
+	rc = qce_sps_init_ep_conn(pce_dev,
+			&pce_dev->ce_bam_info.consumer, false);
+	if (rc)
+		goto sps_connect_consumer_err;
+
+	pr_info(" QTI MSM CE-BAM at 0x%016llx irq %d\n",
+		(unsigned long long)pce_dev->ce_bam_info.bam_mem,
+		(unsigned int)pce_dev->ce_bam_info.bam_irq);
+	return rc;
+
+sps_connect_consumer_err:
+	qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer);
+sps_connect_producer_err:
+	qce_sps_release_bam(pce_dev);
+	return rc;
+}
+
+static inline int qce_alloc_req_info(struct qce_device *pce_dev)
+{
+	int i;
+	int request_index = pce_dev->ce_request_index;
+
+	for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
+		request_index++;
+		if (request_index >= MAX_QCE_BAM_REQ)
+			request_index = 0;
+		if (xchg(&pce_dev->ce_request_info[request_index].
+						in_use, true) == false) {
+			pce_dev->ce_request_index = request_index;
+			return request_index;
+		}
+	}
+	pr_warn("pcedev %d no reqs available no_of_queued_req %d\n",
+			pce_dev->dev_no, atomic_read(
+					&pce_dev->no_of_queued_req));
+	return -EBUSY;
+}
+
+static inline void qce_free_req_info(struct qce_device *pce_dev, int req_info,
+		bool is_complete)
+{
+	pce_dev->ce_request_info[req_info].xfer_type = QCE_XFER_TYPE_LAST;
+	if (xchg(&pce_dev->ce_request_info[req_info].in_use, false) == true) {
+		if (req_info < MAX_QCE_BAM_REQ && is_complete)
+			atomic_dec(&pce_dev->no_of_queued_req);
+	} else
+		pr_warn("request info %d free already\n", req_info);
+}
+
+static void print_notify_debug(struct sps_event_notify *notify)
+{
+	phys_addr_t addr =
+		DESC_FULL_ADDR((phys_addr_t) notify->data.transfer.iovec.flags,
+				  notify->data.transfer.iovec.addr);
+	pr_debug("sps ev_id=%d, addr=0x%pa, size=0x%x, flags=0x%x user=0x%p\n",
+			notify->event_id, &addr,
+			notify->data.transfer.iovec.size,
+			notify->data.transfer.iovec.flags,
+			notify->data.transfer.user);
+}
+
+static void _qce_req_complete(struct qce_device *pce_dev, unsigned int req_info)
+{
+	struct ce_request_info *preq_info;
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+
+	switch (preq_info->xfer_type) {
+	case QCE_XFER_CIPHERING:
+		_ablk_cipher_complete(pce_dev, req_info);
+		break;
+	case QCE_XFER_HASHING:
+		_sha_complete(pce_dev, req_info);
+		break;
+	case QCE_XFER_AEAD:
+		_aead_complete(pce_dev, req_info);
+		break;
+	case QCE_XFER_F8:
+		_f8_complete(pce_dev, req_info);
+		break;
+	case QCE_XFER_F9:
+		_f9_complete(pce_dev, req_info);
+		break;
+	default:
+		qce_free_req_info(pce_dev, req_info, true);
+		break;
+	}
+}
+
+static void qce_multireq_timeout(unsigned long data)
+{
+	struct qce_device *pce_dev = (struct qce_device *)data;
+	int ret = 0;
+	int last_seq;
+	unsigned long flags;
+
+	last_seq = atomic_read(&pce_dev->bunch_cmd_seq);
+	if (last_seq == 0 ||
+		last_seq != atomic_read(&pce_dev->last_intr_seq)) {
+		atomic_set(&pce_dev->last_intr_seq, last_seq);
+		mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
+		return;
+	}
+	/* last bunch mode command time out */
+
+	/*
+	 * From here to dummy request finish sps request and set owner back
+	 * to none, we disable interrupt.
+	 * So it won't get preempted or interrupted. If bam inerrupts happen
+	 * between, and completion callback gets called from BAM, a new
+	 * request may be issued by the client driver.  Deadlock may happen.
+	 */
+	local_irq_save(flags);
+	if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_TIMEOUT)
+							!= QCE_OWNER_NONE) {
+		local_irq_restore(flags);
+		mod_timer(&(pce_dev->timer), (jiffies + DELAY_IN_JIFFIES));
+		return;
+	}
+
+	ret = qce_dummy_req(pce_dev);
+	if (ret)
+		pr_warn("pcedev %d: Failed to insert dummy req\n",
+				pce_dev->dev_no);
+	cmpxchg(&pce_dev->owner, QCE_OWNER_TIMEOUT, QCE_OWNER_NONE);
+	pce_dev->mode = IN_INTERRUPT_MODE;
+	local_irq_restore(flags);
+
+	del_timer(&(pce_dev->timer));
+	pce_dev->qce_stats.no_of_timeouts++;
+	pr_debug("pcedev %d mode switch to INTR\n", pce_dev->dev_no);
+}
+
+void qce_get_driver_stats(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	if (!_qce50_disp_stats)
+		return;
+	pr_info("Engine %d timeout occuured %d\n", pce_dev->dev_no,
+			pce_dev->qce_stats.no_of_timeouts);
+	pr_info("Engine %d dummy request inserted %d\n", pce_dev->dev_no,
+			pce_dev->qce_stats.no_of_dummy_reqs);
+	if (pce_dev->mode)
+		pr_info("Engine %d is in BUNCH MODE\n", pce_dev->dev_no);
+	else
+		pr_info("Engine %d is in INTERRUPT MODE\n", pce_dev->dev_no);
+	pr_info("Engine %d outstanding request %d\n", pce_dev->dev_no,
+			atomic_read(&pce_dev->no_of_queued_req));
+}
+EXPORT_SYMBOL(qce_get_driver_stats);
+
+void qce_clear_driver_stats(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	pce_dev->qce_stats.no_of_timeouts = 0;
+	pce_dev->qce_stats.no_of_dummy_reqs = 0;
+}
+EXPORT_SYMBOL(qce_clear_driver_stats);
+
+static void _sps_producer_callback(struct sps_event_notify *notify)
+{
+	struct qce_device *pce_dev = (struct qce_device *)
+		((struct sps_event_notify *)notify)->user;
+	int rc = 0;
+	unsigned int req_info;
+	struct ce_sps_data *pce_sps_data;
+	struct ce_request_info *preq_info;
+
+	print_notify_debug(notify);
+
+	req_info = (unsigned int)((uintptr_t)notify->data.transfer.user);
+	if ((req_info & 0xffff0000)  != CRYPTO_REQ_USER_PAT) {
+		pr_warn("request information %d out of range\n", req_info);
+		return;
+	}
+
+	req_info = req_info & 0x00ff;
+	if (req_info < 0 || req_info >= MAX_QCE_ALLOC_BAM_REQ) {
+		pr_warn("request information %d out of range\n", req_info);
+		return;
+	}
+
+	preq_info = &pce_dev->ce_request_info[req_info];
+
+	pce_sps_data = &preq_info->ce_sps;
+	if ((preq_info->xfer_type == QCE_XFER_CIPHERING ||
+		preq_info->xfer_type == QCE_XFER_AEAD) &&
+			pce_sps_data->producer_state == QCE_PIPE_STATE_IDLE) {
+		pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+		pce_sps_data->out_transfer.iovec_count = 0;
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer);
+		_qce_set_flag(&pce_sps_data->out_transfer,
+				SPS_IOVEC_FLAG_INT);
+		rc = sps_transfer(pce_dev->ce_bam_info.producer.pipe,
+					  &pce_sps_data->out_transfer);
+		if (rc) {
+			pr_err("sps_xfr() fail (producer pipe=0x%lx) rc = %d\n",
+				(uintptr_t)pce_dev->ce_bam_info.producer.pipe,
+				rc);
+		}
+		return;
+	}
+
+	_qce_req_complete(pce_dev, req_info);
+}
+
+/**
+ * De-initialize SPS HW connected with CE core
+ *
+ * This function deinitialize SPS endpoints and then
+ * deregisters BAM resources from SPS driver.
+ *
+ * This function should only be called once typically
+ * during driver remove.
+ *
+ * @pce_dev - Pointer to qce_device structure
+ *
+ */
+static void qce_sps_exit(struct qce_device *pce_dev)
+{
+	qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.consumer);
+	qce_sps_exit_ep_conn(pce_dev, &pce_dev->ce_bam_info.producer);
+	qce_sps_release_bam(pce_dev);
+}
+
+static void qce_add_cmd_element(struct qce_device *pdev,
+			struct sps_command_element **cmd_ptr, u32 addr,
+			u32 data, struct sps_command_element **populate)
+{
+	(*cmd_ptr)->addr = (uint32_t)(addr + pdev->phy_iobase);
+	(*cmd_ptr)->command = 0;
+	(*cmd_ptr)->data = data;
+	(*cmd_ptr)->mask = 0xFFFFFFFF;
+	(*cmd_ptr)->reserved = 0;
+	if (populate != NULL)
+		*populate = *cmd_ptr;
+	(*cmd_ptr)++;
+}
+
+static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev, int cri_index,
+		unsigned char **pvaddr, enum qce_cipher_mode_enum mode,
+		bool key_128)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t key_reg = 0;
+	uint32_t xts_key_reg = 0;
+	uint32_t iv_reg = 0;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to AES cipher operations defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+	switch (mode) {
+	case QCE_MODE_CBC:
+	case QCE_MODE_CTR:
+		if (key_128 == true) {
+			cmdlistptr->cipher_aes_128_cbc_ctr.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_128_cbc_ctr);
+			if (mode == QCE_MODE_CBC)
+				encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
+			else
+				encr_cfg = pdev->reg.encr_cfg_aes_ctr_128;
+			iv_reg = 4;
+			key_reg = 4;
+			xts_key_reg = 0;
+		} else {
+			cmdlistptr->cipher_aes_256_cbc_ctr.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_256_cbc_ctr);
+
+			if (mode == QCE_MODE_CBC)
+				encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
+			else
+				encr_cfg = pdev->reg.encr_cfg_aes_ctr_256;
+			iv_reg = 4;
+			key_reg = 8;
+			xts_key_reg = 0;
+		}
+	break;
+	case QCE_MODE_ECB:
+		if (key_128 == true) {
+			cmdlistptr->cipher_aes_128_ecb.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_128_ecb);
+
+			encr_cfg = pdev->reg.encr_cfg_aes_ecb_128;
+			iv_reg = 0;
+			key_reg = 4;
+			xts_key_reg = 0;
+		} else {
+			cmdlistptr->cipher_aes_256_ecb.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_256_ecb);
+
+			encr_cfg = pdev->reg.encr_cfg_aes_ecb_256;
+			iv_reg = 0;
+			key_reg = 8;
+			xts_key_reg = 0;
+		}
+	break;
+	case QCE_MODE_XTS:
+		if (key_128 == true) {
+			cmdlistptr->cipher_aes_128_xts.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_128_xts);
+
+			encr_cfg = pdev->reg.encr_cfg_aes_xts_128;
+			iv_reg = 4;
+			key_reg = 4;
+			xts_key_reg = 4;
+		} else {
+			cmdlistptr->cipher_aes_256_xts.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_aes_256_xts);
+
+			encr_cfg = pdev->reg.encr_cfg_aes_xts_256;
+			iv_reg = 4;
+			key_reg = 8;
+			xts_key_reg = 8;
+		}
+	break;
+	default:
+		pr_err("Unknown mode of operation %d received, exiting now\n",
+			mode);
+		return -EINVAL;
+	break;
+	}
+
+	/* clear status register */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
+				(uint32_t)0xffffffff, &pcl_info->encr_mask);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
+				(uint32_t)0xffffffff, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
+				(uint32_t)0xffffffff, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
+				(uint32_t)0xffffffff, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+						&pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	if (xts_key_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_XTS_KEY0_REG,
+					0, &pcl_info->encr_xts_key);
+		for (i = 1; i < xts_key_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_XTS_KEY0_REG +
+						i * sizeof(uint32_t)), 0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				CRYPTO_ENCR_XTS_DU_SIZE_REG, 0,
+					&pcl_info->encr_xts_du_size);
+	}
+	if (iv_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+		for (i = 1; i < iv_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	}
+	/* Add dummy to  align size to burst-size multiple */
+	if (mode == QCE_MODE_XTS) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+						0, &pcl_info->auth_seg_size);
+	} else {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+						0, &pcl_info->auth_seg_size);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
+						0, &pcl_info->auth_seg_size);
+	}
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev, int cri_index,
+		unsigned char **pvaddr, enum qce_cipher_alg_enum alg,
+		bool mode_cbc)
+{
+
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t key_reg = 0;
+	uint32_t iv_reg = 0;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to cipher operations defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+	switch (alg) {
+	case CIPHER_ALG_DES:
+		if (mode_cbc) {
+			cmdlistptr->cipher_des_cbc.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_des_cbc);
+
+
+			encr_cfg = pdev->reg.encr_cfg_des_cbc;
+			iv_reg = 2;
+			key_reg = 2;
+		} else {
+			cmdlistptr->cipher_des_ecb.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_des_ecb);
+
+			encr_cfg = pdev->reg.encr_cfg_des_ecb;
+			iv_reg = 0;
+			key_reg = 2;
+		}
+	break;
+	case CIPHER_ALG_3DES:
+		if (mode_cbc) {
+			cmdlistptr->cipher_3des_cbc.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_3des_cbc);
+
+			encr_cfg = pdev->reg.encr_cfg_3des_cbc;
+			iv_reg = 2;
+			key_reg = 6;
+		} else {
+			cmdlistptr->cipher_3des_ecb.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->cipher_3des_ecb);
+
+			encr_cfg = pdev->reg.encr_cfg_3des_ecb;
+			iv_reg = 0;
+			key_reg = 6;
+		}
+	break;
+	default:
+		pr_err("Unknown algorithms %d received, exiting now\n", alg);
+		return -EINVAL;
+	break;
+	}
+
+	/* clear status register */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+						&pcl_info->auth_seg_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	if (iv_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
+								NULL);
+	}
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_cipher_null_cmdlistptrs(struct qce_device *pdev,
+		int cri_index, unsigned char **pvaddr)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
+						[cri_index].ce_sps.cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+	cmdlistptr->cipher_null.cmdlist = (uintptr_t)ce_vaddr;
+	pcl_info = &(cmdlistptr->cipher_null);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG,
+			pdev->ce_bam_info.ce_burst_size, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
+			pdev->reg.encr_cfg_aes_ecb_128, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+			NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+			NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+					0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+			 0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+						NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+	return 0;
+}
+
+static int _setup_auth_cmdlistptrs(struct qce_device *pdev, int cri_index,
+		unsigned char **pvaddr, enum qce_hash_alg_enum alg,
+		bool key_128)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t key_reg = 0;
+	uint32_t auth_cfg = 0;
+	uint32_t iv_reg = 0;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to authentication operations
+	 * defined in ce_cmdlistptrs_ops structure.
+	 */
+	switch (alg) {
+	case QCE_HASH_SHA1:
+		cmdlistptr->auth_sha1.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha1);
+
+		auth_cfg = pdev->reg.auth_cfg_sha1;
+		iv_reg = 5;
+
+		/* clear status register */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+					0, NULL);
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	break;
+	case QCE_HASH_SHA256:
+		cmdlistptr->auth_sha256.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha256);
+
+		auth_cfg = pdev->reg.auth_cfg_sha256;
+		iv_reg = 8;
+
+		/* clear status register */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+					0, NULL);
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+		/* 1 dummy write */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+	break;
+	case QCE_HASH_SHA1_HMAC:
+		cmdlistptr->auth_sha1_hmac.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha1_hmac);
+
+		auth_cfg = pdev->reg.auth_cfg_hmac_sha1;
+		key_reg = 16;
+		iv_reg = 5;
+
+		/* clear status register */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+					0, NULL);
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+	break;
+	case QCE_HASH_SHA256_HMAC:
+		cmdlistptr->auth_sha256_hmac.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->auth_sha256_hmac);
+
+		auth_cfg = pdev->reg.auth_cfg_hmac_sha256;
+		key_reg = 16;
+		iv_reg = 8;
+
+		/* clear status register */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
+					NULL);
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+		/* 1 dummy write */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+	break;
+	case QCE_HASH_AES_CMAC:
+		if (key_128 == true) {
+			cmdlistptr->auth_aes_128_cmac.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->auth_aes_128_cmac);
+
+			auth_cfg = pdev->reg.auth_cfg_cmac_128;
+			key_reg = 4;
+		} else {
+			cmdlistptr->auth_aes_256_cmac.cmdlist =
+						(uintptr_t)ce_vaddr;
+			pcl_info = &(cmdlistptr->auth_aes_256_cmac);
+
+			auth_cfg = pdev->reg.auth_cfg_cmac_256;
+			key_reg = 8;
+		}
+
+		/* clear status register */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
+					NULL);
+
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+		/* 1 dummy write */
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
+								0, NULL);
+	break;
+	default:
+		pr_err("Unknown algorithms %d received, exiting now\n", alg);
+		return -EINVAL;
+	break;
+	}
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+					auth_cfg, &pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+						&pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+						&pcl_info->auth_seg_start);
+
+	if (alg == QCE_HASH_AES_CMAC) {
+		/* reset auth iv, bytecount and key  registers */
+		for (i = 0; i < 16; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+		for (i = 0; i < 16; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+						0, NULL);
+	} else {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+							&pcl_info->auth_iv);
+		for (i = 1; i < iv_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+						0, &pcl_info->auth_bytecount);
+	}
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+	if (key_reg) {
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				CRYPTO_AUTH_KEY0_REG, 0, &pcl_info->auth_key);
+		for (i = 1; i < key_reg; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+	}
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_aead_cmdlistptrs(struct qce_device *pdev,
+				int cri_index,
+				unsigned char **pvaddr,
+				uint32_t alg,
+				uint32_t mode,
+				uint32_t key_size,
+				bool     sha1)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	uint32_t key_reg;
+	uint32_t iv_reg;
+	uint32_t i;
+	uint32_t  enciv_in_word;
+	uint32_t encr_cfg;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+	switch (alg) {
+
+	case CIPHER_ALG_DES:
+
+		switch (mode) {
+
+		case QCE_MODE_CBC:
+			if (sha1) {
+				cmdlistptr->aead_hmac_sha1_cbc_des.cmdlist =
+					(uintptr_t)ce_vaddr;
+				pcl_info = &(cmdlistptr->
+					aead_hmac_sha1_cbc_des);
+			} else {
+				cmdlistptr->aead_hmac_sha256_cbc_des.cmdlist =
+					(uintptr_t)ce_vaddr;
+				pcl_info = &(cmdlistptr->
+					aead_hmac_sha256_cbc_des);
+			}
+			encr_cfg = pdev->reg.encr_cfg_des_cbc;
+			break;
+		default:
+			return -EINVAL;
+		};
+
+		enciv_in_word = 2;
+
+		break;
+
+	case CIPHER_ALG_3DES:
+		switch (mode) {
+
+		case QCE_MODE_CBC:
+			if (sha1) {
+				cmdlistptr->aead_hmac_sha1_cbc_3des.cmdlist =
+					(uintptr_t)ce_vaddr;
+				pcl_info = &(cmdlistptr->
+					aead_hmac_sha1_cbc_3des);
+			} else {
+				cmdlistptr->aead_hmac_sha256_cbc_3des.cmdlist =
+					(uintptr_t)ce_vaddr;
+				pcl_info = &(cmdlistptr->
+					aead_hmac_sha256_cbc_3des);
+			}
+			encr_cfg = pdev->reg.encr_cfg_3des_cbc;
+			break;
+		default:
+			return -EINVAL;
+		};
+
+		enciv_in_word = 2;
+
+		break;
+
+	case CIPHER_ALG_AES:
+		switch (mode) {
+
+		case QCE_MODE_CBC:
+			if (key_size ==  AES128_KEY_SIZE) {
+				if (sha1) {
+					cmdlistptr->
+						aead_hmac_sha1_cbc_aes_128.
+						cmdlist = (uintptr_t)ce_vaddr;
+					pcl_info = &(cmdlistptr->
+						aead_hmac_sha1_cbc_aes_128);
+				} else {
+					cmdlistptr->
+						aead_hmac_sha256_cbc_aes_128.
+						cmdlist = (uintptr_t)ce_vaddr;
+					pcl_info = &(cmdlistptr->
+						aead_hmac_sha256_cbc_aes_128);
+				}
+				encr_cfg = pdev->reg.encr_cfg_aes_cbc_128;
+			} else if (key_size ==  AES256_KEY_SIZE) {
+				if (sha1) {
+					cmdlistptr->
+						aead_hmac_sha1_cbc_aes_256.
+						cmdlist = (uintptr_t)ce_vaddr;
+					pcl_info = &(cmdlistptr->
+						aead_hmac_sha1_cbc_aes_256);
+				} else {
+					cmdlistptr->
+						aead_hmac_sha256_cbc_aes_256.
+						cmdlist = (uintptr_t)ce_vaddr;
+					pcl_info = &(cmdlistptr->
+						aead_hmac_sha256_cbc_aes_256);
+				}
+				encr_cfg = pdev->reg.encr_cfg_aes_cbc_256;
+			} else {
+				return -EINVAL;
+			}
+			break;
+		default:
+			return -EINVAL;
+		};
+
+		enciv_in_word = 4;
+
+		break;
+
+	default:
+		return -EINVAL;
+	};
+
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+
+	key_reg = key_size/sizeof(uint32_t);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+			&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+			(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+			0, NULL);
+
+	if (mode != QCE_MODE_ECB) {
+		qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+			&pcl_info->encr_cntr_iv);
+		for (i = 1; i < enciv_in_word; i++)
+			qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	};
+
+	if (sha1)
+		iv_reg = 5;
+	else
+		iv_reg = 8;
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+				&pcl_info->auth_iv);
+	for (i = 1; i < iv_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+			(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+				0, &pcl_info->auth_bytecount);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+	key_reg = SHA_HMAC_KEY_SIZE/sizeof(uint32_t);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
+			 &pcl_info->auth_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+			(CRYPTO_AUTH_KEY0_REG + i*sizeof(uint32_t)), 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+			&pcl_info->seg_size);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+			&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+			&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+			&pcl_info->encr_seg_start);
+
+	if (sha1)
+		qce_add_cmd_element(
+			pdev,
+			&ce_vaddr,
+			CRYPTO_AUTH_SEG_CFG_REG,
+			pdev->reg.auth_cfg_aead_sha1_hmac,
+			&pcl_info->auth_seg_cfg);
+	else
+		qce_add_cmd_element(
+			pdev,
+			&ce_vaddr,
+			CRYPTO_AUTH_SEG_CFG_REG,
+			pdev->reg.auth_cfg_aead_sha256_hmac,
+			&pcl_info->auth_seg_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+			&pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+			&pcl_info->auth_seg_start);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+	return 0;
+}
+
+static int _setup_aead_ccm_cmdlistptrs(struct qce_device *pdev, int cri_index,
+				unsigned char **pvaddr, bool key_128)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr = &pdev->ce_request_info
+						[cri_index].ce_sps.cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t auth_cfg = 0;
+	uint32_t key_reg = 0;
+
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to aead operations
+	 * defined in ce_cmdlistptrs_ops structure.
+	 */
+	if (key_128 == true) {
+		cmdlistptr->aead_aes_128_ccm.cmdlist =
+						(uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->aead_aes_128_ccm);
+
+		auth_cfg = pdev->reg.auth_cfg_aes_ccm_128;
+		encr_cfg = pdev->reg.encr_cfg_aes_ccm_128;
+		key_reg = 4;
+	} else {
+
+		cmdlistptr->aead_aes_256_ccm.cmdlist =
+						(uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->aead_aes_256_ccm);
+
+		auth_cfg = pdev->reg.auth_cfg_aes_ccm_256;
+		encr_cfg = pdev->reg.encr_cfg_aes_ccm_256;
+
+		key_reg = 8;
+	}
+
+	/* clear status register */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+									NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG,
+					encr_cfg, &pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG,
+				(uint32_t)0xffffffff, &pcl_info->encr_mask);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG0,
+				(uint32_t)0xffffffff, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG1,
+				(uint32_t)0xffffffff, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR_MASK_REG2,
+				(uint32_t)0xffffffff, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+					auth_cfg, &pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+						&pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+						&pcl_info->auth_seg_start);
+	/* reset auth iv, bytecount and key  registers */
+	for (i = 0; i < 8; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+					0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG,
+					0, NULL);
+	for (i = 0; i < 16; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	/* set auth key */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_KEY0_REG, 0,
+							&pcl_info->auth_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	/* set NONCE info */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_INFO_NONCE0_REG, 0,
+						&pcl_info->auth_nonce_info);
+	for (i = 1; i < 4; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_INFO_NONCE0_REG +
+				i * sizeof(uint32_t)), 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						&pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+	for (i = 1; i < 4; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_CNTR0_IV0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_CCM_INT_CNTR0_REG, 0,
+						&pcl_info->encr_ccm_cntr_iv);
+	for (i = 1; i < 4; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+			(CRYPTO_ENCR_CCM_INT_CNTR0_REG + i * sizeof(uint32_t)),
+			0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_f8_cmdlistptrs(struct qce_device *pdev, int cri_index,
+	unsigned char **pvaddr, enum qce_ota_algo_enum alg)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t encr_cfg = 0;
+	uint32_t key_reg = 4;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to f8 cipher algorithm defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+
+	switch (alg) {
+	case QCE_OTA_ALGO_KASUMI:
+		cmdlistptr->f8_kasumi.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->f8_kasumi);
+		encr_cfg = pdev->reg.encr_cfg_kasumi;
+		break;
+
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		cmdlistptr->f8_snow3g.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->f8_snow3g);
+		encr_cfg = pdev->reg.encr_cfg_snow3g;
+		break;
+	}
+	/* clear status register */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+							0, NULL);
+	/* set config to big endian */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, encr_cfg,
+						&pcl_info->encr_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0,
+						&pcl_info->encr_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
+						&pcl_info->encr_seg_start);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG, 0,
+						&pcl_info->auth_seg_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
+						0, &pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
+						0, &pcl_info->auth_seg_start);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_KEY0_REG, 0,
+						 &pcl_info->encr_key);
+	for (i = 1; i < key_reg; i++)
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_ENCR_KEY0_REG + i * sizeof(uint32_t)),
+				0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR0_IV0_REG, 0,
+						&pcl_info->encr_cntr_iv);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
+								NULL);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_f9_cmdlistptrs(struct qce_device *pdev, int cri_index,
+	unsigned char **pvaddr, enum qce_ota_algo_enum alg)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start;
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+	int i = 0;
+	uint32_t auth_cfg = 0;
+	uint32_t iv_reg = 0;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr_start = (uintptr_t)(*pvaddr);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to authentication operations
+	 * defined in ce_cmdlistptrs_ops structure.
+	 */
+	switch (alg) {
+	case QCE_OTA_ALGO_KASUMI:
+		cmdlistptr->f9_kasumi.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->f9_kasumi);
+		auth_cfg = pdev->reg.auth_cfg_kasumi;
+		break;
+
+	case QCE_OTA_ALGO_SNOW3G:
+	default:
+		cmdlistptr->f9_snow3g.cmdlist = (uintptr_t)ce_vaddr;
+		pcl_info = &(cmdlistptr->f9_snow3g);
+		auth_cfg = pdev->reg.auth_cfg_snow3g;
+	};
+
+	/* clear status register */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
+							0, NULL);
+	/* set config to big endian */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+			pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
+
+	iv_reg = 5;
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_SEG_SIZE_REG, 0,
+						&pcl_info->seg_size);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0,
+						&pcl_info->encr_seg_cfg);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
+					auth_cfg, &pcl_info->auth_seg_cfg);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG, 0,
+						&pcl_info->auth_seg_size);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG, 0,
+						&pcl_info->auth_seg_start);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_IV0_REG, 0,
+							&pcl_info->auth_iv);
+	for (i = 1; i < iv_reg; i++) {
+		qce_add_cmd_element(pdev, &ce_vaddr,
+				(CRYPTO_AUTH_IV0_REG + i*sizeof(uint32_t)),
+				0, NULL);
+	}
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT0_REG,
+					0, &pcl_info->auth_bytecount);
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_BYTECNT1_REG, 0, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					pdev->reg.crypto_cfg_le, NULL);
+
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_GOPROC_REG,
+			((1 << CRYPTO_GO) | (1 << CRYPTO_RESULTS_DUMP) |
+			(1 << CRYPTO_CLR_CNTXT)), &pcl_info->go_proc);
+
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int _setup_unlock_pipe_cmdlistptrs(struct qce_device *pdev,
+		int cri_index, unsigned char **pvaddr)
+{
+	struct sps_command_element *ce_vaddr;
+	uintptr_t ce_vaddr_start = (uintptr_t)(*pvaddr);
+	struct qce_cmdlistptr_ops *cmdlistptr;
+	struct qce_cmdlist_info *pcl_info = NULL;
+
+	cmdlistptr = &pdev->ce_request_info[cri_index].ce_sps.cmdlistptr;
+	*pvaddr = (unsigned char *)ALIGN(((uintptr_t)(*pvaddr)),
+					pdev->ce_bam_info.ce_burst_size);
+	ce_vaddr = (struct sps_command_element *)(*pvaddr);
+	cmdlistptr->unlock_all_pipes.cmdlist = (uintptr_t)ce_vaddr;
+	pcl_info = &(cmdlistptr->unlock_all_pipes);
+
+	/*
+	 * Designate chunks of the allocated memory to command list
+	 * to unlock pipes.
+	 */
+	qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
+					CRYPTO_CONFIG_RESET, NULL);
+	pcl_info->size = (uintptr_t)ce_vaddr - (uintptr_t)ce_vaddr_start;
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	return 0;
+}
+
+static int qce_setup_cmdlistptrs(struct qce_device *pdev, int cri_index,
+					unsigned char **pvaddr)
+{
+	struct sps_command_element *ce_vaddr =
+				(struct sps_command_element *)(*pvaddr);
+	/*
+	 * Designate chunks of the allocated memory to various
+	 * command list pointers related to operations defined
+	 * in ce_cmdlistptrs_ops structure.
+	 */
+	ce_vaddr =
+		(struct sps_command_element *)ALIGN(((uintptr_t) ce_vaddr),
+					pdev->ce_bam_info.ce_burst_size);
+	*pvaddr = (unsigned char *) ce_vaddr;
+
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
+								true);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
+								true);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
+								true);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
+								true);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CBC,
+								false);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_CTR,
+								false);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_ECB,
+								false);
+	_setup_cipher_aes_cmdlistptrs(pdev, cri_index, pvaddr, QCE_MODE_XTS,
+								false);
+
+	_setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+								true);
+	_setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+								false);
+	_setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+								true);
+	_setup_cipher_des_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+								false);
+
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1,
+								false);
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256,
+								false);
+
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA1_HMAC,
+								false);
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_SHA256_HMAC,
+								false);
+
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
+								true);
+	_setup_auth_cmdlistptrs(pdev, cri_index, pvaddr, QCE_HASH_AES_CMAC,
+								false);
+
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+					QCE_MODE_CBC, DES_KEY_SIZE, true);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+					QCE_MODE_CBC, DES3_EDE_KEY_SIZE, true);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+					QCE_MODE_CBC, AES128_KEY_SIZE, true);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+					QCE_MODE_CBC, AES256_KEY_SIZE, true);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_DES,
+					QCE_MODE_CBC, DES_KEY_SIZE, false);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_3DES,
+					QCE_MODE_CBC, DES3_EDE_KEY_SIZE, false);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+					QCE_MODE_CBC, AES128_KEY_SIZE, false);
+	_setup_aead_cmdlistptrs(pdev, cri_index, pvaddr, CIPHER_ALG_AES,
+					QCE_MODE_CBC, AES256_KEY_SIZE, false);
+
+	_setup_cipher_null_cmdlistptrs(pdev, cri_index, pvaddr);
+
+	_setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, true);
+	_setup_aead_ccm_cmdlistptrs(pdev, cri_index, pvaddr, false);
+	_setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
+	_setup_f8_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
+	_setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_KASUMI);
+	_setup_f9_cmdlistptrs(pdev, cri_index, pvaddr, QCE_OTA_ALGO_SNOW3G);
+	_setup_unlock_pipe_cmdlistptrs(pdev, cri_index, pvaddr);
+
+	return 0;
+}
+
+static int qce_setup_ce_sps_data(struct qce_device *pce_dev)
+{
+	unsigned char *vaddr;
+	int i;
+	unsigned char *iovec_vaddr;
+	int iovec_memsize;
+
+	vaddr = pce_dev->coh_vmem;
+	vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
+					pce_dev->ce_bam_info.ce_burst_size);
+	iovec_vaddr = pce_dev->iovec_vmem;
+	iovec_memsize = pce_dev->iovec_memsize;
+	for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++) {
+		/* Allow for 256 descriptor (cmd and data) entries per pipe */
+		pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec =
+				(struct sps_iovec *)iovec_vaddr;
+		pce_dev->ce_request_info[i].ce_sps.in_transfer.iovec_phys =
+			virt_to_phys(pce_dev->ce_request_info[i].
+				ce_sps.in_transfer.iovec);
+		iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
+		iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
+		pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec =
+				(struct sps_iovec *)iovec_vaddr;
+		pce_dev->ce_request_info[i].ce_sps.out_transfer.iovec_phys =
+			virt_to_phys(pce_dev->ce_request_info[i].
+				ce_sps.out_transfer.iovec);
+		iovec_vaddr += TOTAL_IOVEC_SPACE_PER_PIPE;
+		iovec_memsize -= TOTAL_IOVEC_SPACE_PER_PIPE;
+		if (pce_dev->support_cmd_dscr)
+			qce_setup_cmdlistptrs(pce_dev, i, &vaddr);
+		vaddr = (unsigned char *)ALIGN(((uintptr_t)vaddr),
+				pce_dev->ce_bam_info.ce_burst_size);
+		pce_dev->ce_request_info[i].ce_sps.result_dump =
+				(uintptr_t)vaddr;
+		pce_dev->ce_request_info[i].ce_sps.result_dump_phy =
+				GET_PHYS_ADDR((uintptr_t)vaddr);
+		pce_dev->ce_request_info[i].ce_sps.result =
+				(struct ce_result_dump_format *)vaddr;
+		vaddr += CRYPTO_RESULT_DUMP_SIZE;
+
+		pce_dev->ce_request_info[i].ce_sps.result_dump_null =
+				(uintptr_t)vaddr;
+		pce_dev->ce_request_info[i].ce_sps.result_dump_null_phy =
+				GET_PHYS_ADDR((uintptr_t)vaddr);
+		pce_dev->ce_request_info[i].ce_sps.result_null =
+				(struct ce_result_dump_format *)vaddr;
+		vaddr += CRYPTO_RESULT_DUMP_SIZE;
+
+		pce_dev->ce_request_info[i].ce_sps.ignore_buffer =
+				(uintptr_t)vaddr;
+		vaddr += pce_dev->ce_bam_info.ce_burst_size * 2;
+	}
+	if ((vaddr - pce_dev->coh_vmem) > pce_dev->memsize ||
+							iovec_memsize < 0)
+		panic("qce50: Not enough coherent memory. Allocate %x , need %lx\n",
+				 pce_dev->memsize, (uintptr_t)vaddr -
+				(uintptr_t)pce_dev->coh_vmem);
+	return 0;
+}
+
+static int qce_init_ce_cfg_val(struct qce_device *pce_dev)
+{
+	uint32_t beats = (pce_dev->ce_bam_info.ce_burst_size >> 3) - 1;
+	uint32_t pipe_pair = pce_dev->ce_bam_info.pipe_pair_index;
+
+	pce_dev->reg.crypto_cfg_be = (beats << CRYPTO_REQ_SIZE) |
+		BIT(CRYPTO_MASK_DOUT_INTR) | BIT(CRYPTO_MASK_DIN_INTR) |
+		BIT(CRYPTO_MASK_OP_DONE_INTR) | (0 << CRYPTO_HIGH_SPD_EN_N) |
+		(pipe_pair << CRYPTO_PIPE_SET_SELECT);
+
+	pce_dev->reg.crypto_cfg_le =
+		(pce_dev->reg.crypto_cfg_be | CRYPTO_LITTLE_ENDIAN_MASK);
+
+	/* Initialize encr_cfg register for AES alg */
+	pce_dev->reg.encr_cfg_aes_cbc_128 =
+		(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_cbc_256 =
+		(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_ctr_128 =
+		(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_ctr_256 =
+		(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_xts_128 =
+		(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_xts_256 =
+		(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_ecb_128 =
+		(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_ecb_256 =
+		(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_aes_ccm_128 =
+		(CRYPTO_ENCR_KEY_SZ_AES128 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE)|
+		(CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
+
+	pce_dev->reg.encr_cfg_aes_ccm_256 =
+		(CRYPTO_ENCR_KEY_SZ_AES256 << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE) |
+		(CRYPTO_LAST_CCM_XFR << CRYPTO_LAST_CCM);
+
+	/* Initialize encr_cfg register for DES alg */
+	pce_dev->reg.encr_cfg_des_ecb =
+		(CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_des_cbc =
+		(CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_3des_ecb =
+		(CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE);
+
+	pce_dev->reg.encr_cfg_3des_cbc =
+		(CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) |
+		(CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG) |
+		(CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE);
+
+	/* Initialize encr_cfg register for kasumi/snow3g  alg */
+	pce_dev->reg.encr_cfg_kasumi =
+		(CRYPTO_ENCR_ALG_KASUMI << CRYPTO_ENCR_ALG);
+
+	pce_dev->reg.encr_cfg_snow3g =
+		(CRYPTO_ENCR_ALG_SNOW_3G << CRYPTO_ENCR_ALG);
+
+	/* Initialize auth_cfg register for CMAC alg */
+	pce_dev->reg.auth_cfg_cmac_128 =
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+		(CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE);
+
+	pce_dev->reg.auth_cfg_cmac_256 =
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+		(CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_ENUM_16_BYTES << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE);
+
+	/* Initialize auth_cfg register for HMAC alg */
+	pce_dev->reg.auth_cfg_hmac_sha1 =
+		(CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	pce_dev->reg.auth_cfg_hmac_sha256 =
+		(CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	/* Initialize auth_cfg register for SHA1/256 alg */
+	pce_dev->reg.auth_cfg_sha1 =
+		(CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	pce_dev->reg.auth_cfg_sha256 =
+		(CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS);
+
+	/* Initialize auth_cfg register for AEAD alg */
+	pce_dev->reg.auth_cfg_aead_sha1_hmac =
+		(CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
+
+	pce_dev->reg.auth_cfg_aead_sha256_hmac =
+		(CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE) |
+		(CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG) |
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST);
+
+	pce_dev->reg.auth_cfg_aes_ccm_128 =
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+		(CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_KEY_SZ_AES128 << CRYPTO_AUTH_KEY_SIZE) |
+		((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
+	pce_dev->reg.auth_cfg_aes_ccm_128 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+
+	pce_dev->reg.auth_cfg_aes_ccm_256 =
+		(1 << CRYPTO_LAST) | (1 << CRYPTO_FIRST) |
+		(CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE)|
+		(CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG) |
+		(CRYPTO_AUTH_KEY_SZ_AES256 << CRYPTO_AUTH_KEY_SIZE) |
+		((MAX_NONCE/sizeof(uint32_t)) << CRYPTO_AUTH_NONCE_NUM_WORDS);
+	pce_dev->reg.auth_cfg_aes_ccm_256 &= ~(1 << CRYPTO_USE_HW_KEY_AUTH);
+
+	/* Initialize auth_cfg register for kasumi/snow3g */
+	pce_dev->reg.auth_cfg_kasumi =
+			(CRYPTO_AUTH_ALG_KASUMI << CRYPTO_AUTH_ALG) |
+				BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
+	pce_dev->reg.auth_cfg_snow3g =
+			(CRYPTO_AUTH_ALG_SNOW3G << CRYPTO_AUTH_ALG) |
+				BIT(CRYPTO_FIRST) | BIT(CRYPTO_LAST);
+	return 0;
+}
+
+static void _qce_ccm_get_around_input(struct qce_device *pce_dev,
+	struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
+{
+	struct qce_cmdlist_info *cmdlistinfo;
+	struct ce_sps_data *pce_sps_data;
+
+	pce_sps_data = &preq_info->ce_sps;
+	if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
+			!(pce_dev->no_ccm_mac_status_get_around)) {
+		cmdlistinfo = &pce_sps_data->cmdlistptr.cipher_null;
+		_qce_sps_add_cmd(pce_dev, 0, cmdlistinfo,
+				&pce_sps_data->in_transfer);
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+			pce_dev->ce_bam_info.ce_burst_size,
+			&pce_sps_data->in_transfer);
+		_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT | SPS_IOVEC_FLAG_NWD);
+	}
+}
+
+static void _qce_ccm_get_around_output(struct qce_device *pce_dev,
+	struct ce_request_info *preq_info, enum qce_cipher_dir_enum dir)
+{
+	struct ce_sps_data *pce_sps_data;
+
+	pce_sps_data = &preq_info->ce_sps;
+
+	if ((dir == QCE_DECRYPT) && pce_dev->no_get_around &&
+			!(pce_dev->no_ccm_mac_status_get_around)) {
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+			pce_dev->ce_bam_info.ce_burst_size,
+			&pce_sps_data->out_transfer);
+		_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump_null),
+			CRYPTO_RESULT_DUMP_SIZE, &pce_sps_data->out_transfer);
+	}
+}
+
+/* QCE_DUMMY_REQ */
+static void qce_dummy_complete(void *cookie, unsigned char *digest,
+		unsigned char *authdata, int ret)
+{
+	if (!cookie)
+		pr_err("invalid cookie\n");
+}
+
+static int qce_dummy_req(struct qce_device *pce_dev)
+{
+	int ret = 0;
+
+	if (!(xchg(&pce_dev->ce_request_info[DUMMY_REQ_INDEX].
+				in_use, true) == false))
+		return -EBUSY;
+	ret = qce_process_sha_req(pce_dev, NULL);
+	pce_dev->qce_stats.no_of_dummy_reqs++;
+	return ret;
+}
+
+static int select_mode(struct qce_device *pce_dev,
+		struct ce_request_info *preq_info)
+{
+	struct ce_sps_data *pce_sps_data = &preq_info->ce_sps;
+	unsigned int no_of_queued_req;
+	unsigned int cadence;
+
+	if (!pce_dev->no_get_around) {
+		_qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
+		return 0;
+	}
+
+	/*
+	 * claim ownership of device
+	 */
+again:
+	if (cmpxchg(&pce_dev->owner, QCE_OWNER_NONE, QCE_OWNER_CLIENT)
+							!= QCE_OWNER_NONE) {
+		ndelay(40);
+		goto again;
+	}
+	no_of_queued_req = atomic_inc_return(&pce_dev->no_of_queued_req);
+	if (pce_dev->mode == IN_INTERRUPT_MODE) {
+		if (no_of_queued_req >= MAX_BUNCH_MODE_REQ) {
+			pce_dev->mode = IN_BUNCH_MODE;
+			pr_debug("pcedev %d mode switch to BUNCH\n",
+					pce_dev->dev_no);
+			_qce_set_flag(&pce_sps_data->out_transfer,
+					SPS_IOVEC_FLAG_INT);
+			pce_dev->intr_cadence = 0;
+			atomic_set(&pce_dev->bunch_cmd_seq, 1);
+			atomic_set(&pce_dev->last_intr_seq, 1);
+			mod_timer(&(pce_dev->timer),
+					(jiffies + DELAY_IN_JIFFIES));
+		} else {
+			_qce_set_flag(&pce_sps_data->out_transfer,
+					SPS_IOVEC_FLAG_INT);
+		}
+	} else {
+		pce_dev->intr_cadence++;
+		cadence = (preq_info->req_len >> 7) + 1;
+		if (cadence > SET_INTR_AT_REQ)
+			cadence = SET_INTR_AT_REQ;
+		if (pce_dev->intr_cadence < cadence || ((pce_dev->intr_cadence
+					== cadence) && pce_dev->cadence_flag))
+			atomic_inc(&pce_dev->bunch_cmd_seq);
+		else {
+			_qce_set_flag(&pce_sps_data->out_transfer,
+					SPS_IOVEC_FLAG_INT);
+			pce_dev->intr_cadence = 0;
+			atomic_set(&pce_dev->bunch_cmd_seq, 0);
+			atomic_set(&pce_dev->last_intr_seq, 0);
+			pce_dev->cadence_flag = ~pce_dev->cadence_flag;
+		}
+	}
+
+	return 0;
+}
+
+static int _qce_aead_ccm_req(void *handle, struct qce_req *q_req)
+{
+	int rc = 0;
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct aead_request *areq = (struct aead_request *) q_req->areq;
+	uint32_t authsize = q_req->authsize;
+	uint32_t totallen_in, out_len;
+	uint32_t hw_pad_out = 0;
+	int ce_burst_size;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+	int req_info = -1;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+
+	ce_burst_size = pce_dev->ce_bam_info.ce_burst_size;
+	totallen_in = areq->cryptlen + q_req->assoclen;
+	if (q_req->dir == QCE_ENCRYPT) {
+		q_req->cryptlen = areq->cryptlen;
+		out_len = areq->cryptlen + authsize;
+		hw_pad_out = ALIGN(authsize, ce_burst_size) - authsize;
+	} else {
+		q_req->cryptlen = areq->cryptlen - authsize;
+		out_len = q_req->cryptlen;
+		hw_pad_out = authsize;
+	}
+
+	/*
+	 * For crypto 5.0 that has burst size alignment requirement
+	 * for data descritpor,
+	 * the agent above(qcrypto) prepares the src scatter list with
+	 * memory starting with associated data, followed by
+	 * data stream to be ciphered.
+	 * The destination scatter list is pointing to the same
+	 * data area as source.
+	 */
+	if (pce_dev->ce_bam_info.minor_version == 0)
+		preq_info->src_nents = count_sg(areq->src, totallen_in);
+	else
+		preq_info->src_nents = count_sg(areq->src, areq->cryptlen +
+							areq->assoclen);
+
+	if (q_req->assoclen) {
+		preq_info->assoc_nents = count_sg(q_req->asg, q_req->assoclen);
+
+		/* formatted associated data input */
+		qce_dma_map_sg(pce_dev->pdev, q_req->asg,
+			preq_info->assoc_nents, DMA_TO_DEVICE);
+		preq_info->asg = q_req->asg;
+	} else {
+		preq_info->assoc_nents = 0;
+		preq_info->asg = NULL;
+	}
+	/* cipher input */
+	qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	/* cipher + mac output  for encryption    */
+	if (areq->src != areq->dst) {
+		if (pce_dev->ce_bam_info.minor_version == 0)
+			/*
+			 * The destination scatter list is pointing to the same
+			 * data area as src.
+			 * Note, the associated data will be pass-through
+			 * at the beginning of destination area.
+			 */
+			preq_info->dst_nents = count_sg(areq->dst,
+						out_len + areq->assoclen);
+		else
+			preq_info->dst_nents = count_sg(areq->dst, out_len +
+						areq->assoclen);
+
+		qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+				DMA_FROM_DEVICE);
+	} else {
+		preq_info->dst_nents = preq_info->src_nents;
+	}
+
+	if (pce_dev->support_cmd_dscr) {
+		cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev, req_info,
+								 q_req);
+		if (cmdlistinfo == NULL) {
+			pr_err("Unsupported cipher algorithm %d, mode %d\n",
+						q_req->alg, q_req->mode);
+			qce_free_req_info(pce_dev, req_info, false);
+			return -EINVAL;
+		}
+		/* set up crypto device */
+		rc = _ce_setup_cipher(pce_dev, q_req, totallen_in,
+					q_req->assoclen, cmdlistinfo);
+	} else {
+		/* set up crypto device */
+		rc = _ce_setup_cipher_direct(pce_dev, q_req, totallen_in,
+					q_req->assoclen);
+	}
+
+	if (rc < 0)
+		goto bad;
+
+	preq_info->mode = q_req->mode;
+
+	/* setup for callback, and issue command to bam */
+	preq_info->areq = q_req->areq;
+	preq_info->qce_cb = q_req->qce_cb;
+	preq_info->dir = q_req->dir;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_AEAD;
+	preq_info->req_len = totallen_in;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+
+	if (pce_dev->support_cmd_dscr)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+
+	if (pce_dev->ce_bam_info.minor_version == 0) {
+		goto bad;
+	} else {
+		if (q_req->assoclen && (_qce_sps_add_sg_data(
+			pce_dev, q_req->asg, q_req->assoclen,
+					 &pce_sps_data->in_transfer)))
+			goto bad;
+		if (_qce_sps_add_sg_data_off(pce_dev, areq->src, areq->cryptlen,
+					areq->assoclen,
+					&pce_sps_data->in_transfer))
+			goto bad;
+		_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+		_qce_ccm_get_around_input(pce_dev, preq_info, q_req->dir);
+
+		if (pce_dev->no_get_around)
+			_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+				&pce_sps_data->cmdlistptr.unlock_all_pipes,
+				&pce_sps_data->in_transfer);
+
+		/* Pass through to ignore associated  data*/
+		if (_qce_sps_add_data(
+				GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+				q_req->assoclen,
+				&pce_sps_data->out_transfer))
+			goto bad;
+		if (_qce_sps_add_sg_data_off(pce_dev, areq->dst, out_len,
+					areq->assoclen,
+					&pce_sps_data->out_transfer))
+			goto bad;
+		/* Pass through to ignore hw_pad (padding of the MAC data) */
+		if (_qce_sps_add_data(
+				GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+				hw_pad_out, &pce_sps_data->out_transfer))
+			goto bad;
+		if (pce_dev->no_get_around ||
+				totallen_in <= SPS_MAX_PKT_SIZE) {
+			if (_qce_sps_add_data(
+				GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer))
+				goto bad;
+			pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+		} else {
+			pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+		}
+
+		_qce_ccm_get_around_output(pce_dev, preq_info, q_req->dir);
+
+		select_mode(pce_dev, preq_info);
+		rc = _qce_sps_transfer(pce_dev, req_info);
+		cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	}
+	if (rc)
+		goto bad;
+	return 0;
+
+bad:
+	if (preq_info->assoc_nents) {
+		qce_dma_unmap_sg(pce_dev->pdev, q_req->asg,
+				preq_info->assoc_nents, DMA_TO_DEVICE);
+	}
+	if (preq_info->src_nents) {
+		qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+				(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+								DMA_TO_DEVICE);
+	}
+	if (areq->src != areq->dst) {
+		qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+				DMA_FROM_DEVICE);
+	}
+	qce_free_req_info(pce_dev, req_info, false);
+	return rc;
+}
+
+static int _qce_suspend(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+	struct sps_pipe *sps_pipe_info;
+
+	if (handle == NULL)
+		return -ENODEV;
+
+	qce_enable_clk(pce_dev);
+
+	sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
+	sps_disconnect(sps_pipe_info);
+
+	sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
+	sps_disconnect(sps_pipe_info);
+
+	qce_disable_clk(pce_dev);
+	return 0;
+}
+
+static int _qce_resume(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+	struct sps_pipe *sps_pipe_info;
+	struct sps_connect *sps_connect_info;
+	int rc;
+
+	if (handle == NULL)
+		return -ENODEV;
+
+	qce_enable_clk(pce_dev);
+
+	sps_pipe_info = pce_dev->ce_bam_info.consumer.pipe;
+	sps_connect_info = &pce_dev->ce_bam_info.consumer.connect;
+	memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
+	rc = sps_connect(sps_pipe_info, sps_connect_info);
+	if (rc) {
+		pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
+			(uintptr_t)sps_pipe_info, rc);
+		return rc;
+	}
+	sps_pipe_info = pce_dev->ce_bam_info.producer.pipe;
+	sps_connect_info = &pce_dev->ce_bam_info.producer.connect;
+	memset(sps_connect_info->desc.base, 0x00, sps_connect_info->desc.size);
+	rc = sps_connect(sps_pipe_info, sps_connect_info);
+	if (rc)
+		pr_err("sps_connect() fail pipe_handle=0x%lx, rc = %d\n",
+			(uintptr_t)sps_pipe_info, rc);
+
+	rc = sps_register_event(sps_pipe_info,
+					&pce_dev->ce_bam_info.producer.event);
+	if (rc)
+		pr_err("Producer callback registration failed rc = %d\n", rc);
+
+	qce_disable_clk(pce_dev);
+	return rc;
+}
+
+struct qce_pm_table qce_pm_table  = {_qce_suspend, _qce_resume};
+EXPORT_SYMBOL(qce_pm_table);
+
+int qce_aead_req(void *handle, struct qce_req *q_req)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+	struct aead_request *areq;
+	uint32_t authsize;
+	struct crypto_aead *aead;
+	uint32_t ivsize;
+	uint32_t totallen;
+	int rc = 0;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+	int req_info = -1;
+	struct ce_sps_data *pce_sps_data;
+	struct ce_request_info *preq_info;
+
+	if (q_req->mode == QCE_MODE_CCM)
+		return _qce_aead_ccm_req(handle, q_req);
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	areq = (struct aead_request *) q_req->areq;
+	aead = crypto_aead_reqtfm(areq);
+	ivsize = crypto_aead_ivsize(aead);
+	q_req->ivsize = ivsize;
+	authsize = q_req->authsize;
+	if (q_req->dir == QCE_ENCRYPT)
+		q_req->cryptlen = areq->cryptlen;
+	else
+		q_req->cryptlen = areq->cryptlen - authsize;
+
+	if (q_req->cryptlen > UINT_MAX - areq->assoclen) {
+		pr_err("Integer overflow on total aead req length.\n");
+		return -EINVAL;
+	}
+
+	totallen = q_req->cryptlen + areq->assoclen;
+
+	if (pce_dev->support_cmd_dscr) {
+		cmdlistinfo = _ce_get_aead_cmdlistinfo(pce_dev,
+							req_info, q_req);
+		if (cmdlistinfo == NULL) {
+			pr_err("Unsupported aead ciphering algorithm %d, mode %d, ciphering key length %d, auth digest size %d\n",
+				q_req->alg, q_req->mode, q_req->encklen,
+					q_req->authsize);
+			qce_free_req_info(pce_dev, req_info, false);
+			return -EINVAL;
+		}
+		/* set up crypto device */
+		rc = _ce_setup_aead(pce_dev, q_req, totallen,
+					areq->assoclen, cmdlistinfo);
+		if (rc < 0) {
+			qce_free_req_info(pce_dev, req_info, false);
+			return -EINVAL;
+		}
+	}
+
+	/*
+	 * For crypto 5.0 that has burst size alignment requirement
+	 * for data descritpor,
+	 * the agent above(qcrypto) prepares the src scatter list with
+	 * memory starting with associated data, followed by
+	 * iv, and data stream to be ciphered.
+	 */
+	preq_info->src_nents = count_sg(areq->src, totallen);
+
+
+	/* cipher input */
+	qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+			(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	/* cipher output  for encryption    */
+	if (areq->src != areq->dst) {
+		preq_info->dst_nents = count_sg(areq->dst, totallen);
+
+		qce_dma_map_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+				DMA_FROM_DEVICE);
+	}
+
+
+	/* setup for callback, and issue command to bam */
+	preq_info->areq = q_req->areq;
+	preq_info->qce_cb = q_req->qce_cb;
+	preq_info->dir = q_req->dir;
+	preq_info->asg = NULL;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_AEAD;
+	preq_info->req_len = totallen;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+
+	if (pce_dev->support_cmd_dscr) {
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+	} else {
+		rc = _ce_setup_aead_direct(pce_dev, q_req, totallen,
+					areq->assoclen);
+		if (rc)
+			goto bad;
+	}
+
+	preq_info->mode = q_req->mode;
+
+	if (pce_dev->ce_bam_info.minor_version == 0) {
+		if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen,
+					&pce_sps_data->in_transfer))
+			goto bad;
+
+		_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+		if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
+				&pce_sps_data->out_transfer))
+			goto bad;
+		if (totallen > SPS_MAX_PKT_SIZE) {
+			_qce_set_flag(&pce_sps_data->out_transfer,
+							SPS_IOVEC_FLAG_INT);
+			pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+		} else {
+			if (_qce_sps_add_data(GET_PHYS_ADDR(
+					pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					&pce_sps_data->out_transfer))
+				goto bad;
+			_qce_set_flag(&pce_sps_data->out_transfer,
+							SPS_IOVEC_FLAG_INT);
+			pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+		}
+	rc = _qce_sps_transfer(pce_dev, req_info);
+	} else {
+		if (_qce_sps_add_sg_data(pce_dev, areq->src, totallen,
+					&pce_sps_data->in_transfer))
+			goto bad;
+		_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+		if (pce_dev->no_get_around)
+			_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+				&pce_sps_data->cmdlistptr.unlock_all_pipes,
+				&pce_sps_data->in_transfer);
+
+		if (_qce_sps_add_sg_data(pce_dev, areq->dst, totallen,
+					&pce_sps_data->out_transfer))
+			goto bad;
+
+		if (pce_dev->no_get_around || totallen <= SPS_MAX_PKT_SIZE) {
+			if (_qce_sps_add_data(
+				GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer))
+				goto bad;
+			pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+		} else {
+			pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+		}
+		select_mode(pce_dev, preq_info);
+		rc = _qce_sps_transfer(pce_dev, req_info);
+		cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	}
+	if (rc)
+		goto bad;
+	return 0;
+
+bad:
+	if (preq_info->src_nents)
+		qce_dma_unmap_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+				(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+								DMA_TO_DEVICE);
+	if (areq->src != areq->dst)
+		qce_dma_unmap_sg(pce_dev->pdev, areq->dst, preq_info->dst_nents,
+				DMA_FROM_DEVICE);
+	qce_free_req_info(pce_dev, req_info, false);
+
+	return rc;
+}
+EXPORT_SYMBOL(qce_aead_req);
+
+int qce_ablk_cipher_req(void *handle, struct qce_req *c_req)
+{
+	int rc = 0;
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	struct ablkcipher_request *areq = (struct ablkcipher_request *)
+						c_req->areq;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+	int req_info = -1;
+	struct ce_sps_data *pce_sps_data;
+	struct ce_request_info *preq_info;
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+
+	preq_info->src_nents = 0;
+	preq_info->dst_nents = 0;
+
+	/* cipher input */
+	preq_info->src_nents = count_sg(areq->src, areq->nbytes);
+
+	qce_dma_map_sg(pce_dev->pdev, areq->src, preq_info->src_nents,
+		(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
+							DMA_TO_DEVICE);
+	/* cipher output */
+	if (areq->src != areq->dst) {
+		preq_info->dst_nents = count_sg(areq->dst, areq->nbytes);
+			qce_dma_map_sg(pce_dev->pdev, areq->dst,
+				preq_info->dst_nents, DMA_FROM_DEVICE);
+	} else {
+		preq_info->dst_nents = preq_info->src_nents;
+	}
+	preq_info->dir = c_req->dir;
+	if  ((pce_dev->ce_bam_info.minor_version == 0) &&
+			(preq_info->dir == QCE_DECRYPT) &&
+			(c_req->mode == QCE_MODE_CBC)) {
+		memcpy(preq_info->dec_iv, (unsigned char *)
+			sg_virt(areq->src) + areq->src->length - 16,
+			NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE);
+	}
+
+	/* set up crypto device */
+	if (pce_dev->support_cmd_dscr) {
+		cmdlistinfo = _ce_get_cipher_cmdlistinfo(pce_dev,
+							req_info, c_req);
+		if (cmdlistinfo == NULL) {
+			pr_err("Unsupported cipher algorithm %d, mode %d\n",
+						c_req->alg, c_req->mode);
+			qce_free_req_info(pce_dev, req_info, false);
+			return -EINVAL;
+		}
+		rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0,
+							cmdlistinfo);
+	} else {
+		rc = _ce_setup_cipher_direct(pce_dev, c_req, areq->nbytes, 0);
+	}
+	if (rc < 0)
+		goto bad;
+
+	preq_info->mode = c_req->mode;
+
+	/* setup for client callback, and issue command to BAM */
+	preq_info->areq = areq;
+	preq_info->qce_cb = c_req->qce_cb;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_CIPHERING;
+	preq_info->req_len = areq->nbytes;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+	if (pce_dev->support_cmd_dscr)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+	if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+					&pce_sps_data->in_transfer))
+		goto bad;
+	_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+	if (pce_dev->no_get_around)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+			&pce_sps_data->cmdlistptr.unlock_all_pipes,
+			&pce_sps_data->in_transfer);
+
+	if (_qce_sps_add_sg_data(pce_dev, areq->dst, areq->nbytes,
+					&pce_sps_data->out_transfer))
+		goto bad;
+	if (pce_dev->no_get_around || areq->nbytes <= SPS_MAX_PKT_SIZE) {
+		pce_sps_data->producer_state = QCE_PIPE_STATE_COMP;
+		if (_qce_sps_add_data(
+				GET_PHYS_ADDR(pce_sps_data->result_dump),
+				CRYPTO_RESULT_DUMP_SIZE,
+				&pce_sps_data->out_transfer))
+			goto bad;
+	} else {
+		pce_sps_data->producer_state = QCE_PIPE_STATE_IDLE;
+	}
+
+	select_mode(pce_dev, preq_info);
+	rc = _qce_sps_transfer(pce_dev, req_info);
+	cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	if (rc)
+		goto bad;
+
+	return 0;
+bad:
+	if (areq->src != areq->dst) {
+		if (preq_info->dst_nents) {
+			qce_dma_unmap_sg(pce_dev->pdev, areq->dst,
+			preq_info->dst_nents, DMA_FROM_DEVICE);
+		}
+	}
+	if (preq_info->src_nents) {
+		qce_dma_unmap_sg(pce_dev->pdev, areq->src,
+				preq_info->src_nents,
+				(areq->src == areq->dst) ?
+				DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	}
+	qce_free_req_info(pce_dev, req_info, false);
+	return rc;
+}
+EXPORT_SYMBOL(qce_ablk_cipher_req);
+
+int qce_process_sha_req(void *handle, struct qce_sha_req *sreq)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	int rc;
+
+	struct ahash_request *areq;
+	struct qce_cmdlist_info *cmdlistinfo = NULL;
+	int req_info = -1;
+	struct ce_sps_data *pce_sps_data;
+	struct ce_request_info *preq_info;
+	bool is_dummy = false;
+
+	if (!sreq) {
+		sreq = &(pce_dev->dummyreq.sreq);
+		req_info = DUMMY_REQ_INDEX;
+		is_dummy = true;
+	} else {
+		req_info = qce_alloc_req_info(pce_dev);
+		if (req_info < 0)
+			return -EBUSY;
+	}
+
+	areq = (struct ahash_request *)sreq->areq;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+
+	preq_info->src_nents = count_sg(sreq->src, sreq->size);
+	qce_dma_map_sg(pce_dev->pdev, sreq->src, preq_info->src_nents,
+							DMA_TO_DEVICE);
+
+	if (pce_dev->support_cmd_dscr) {
+		cmdlistinfo = _ce_get_hash_cmdlistinfo(pce_dev, req_info, sreq);
+		if (cmdlistinfo == NULL) {
+			pr_err("Unsupported hash algorithm %d\n", sreq->alg);
+			qce_free_req_info(pce_dev, req_info, false);
+			return -EINVAL;
+		}
+		rc = _ce_setup_hash(pce_dev, sreq, cmdlistinfo);
+	} else {
+		rc = _ce_setup_hash_direct(pce_dev, sreq);
+	}
+	if (rc < 0)
+		goto bad;
+
+	preq_info->areq = areq;
+	preq_info->qce_cb = sreq->qce_cb;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_HASHING;
+	preq_info->req_len = sreq->size;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+
+	if (pce_dev->support_cmd_dscr)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+	if (_qce_sps_add_sg_data(pce_dev, areq->src, areq->nbytes,
+						 &pce_sps_data->in_transfer))
+		goto bad;
+
+	/* always ensure there is input data. ZLT does not work for bam-ndp */
+	if (!areq->nbytes)
+		_qce_sps_add_data(
+			GET_PHYS_ADDR(pce_sps_data->ignore_buffer),
+			pce_dev->ce_bam_info.ce_burst_size,
+			&pce_sps_data->in_transfer);
+	_qce_set_flag(&pce_sps_data->in_transfer,
+					SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+	if (pce_dev->no_get_around)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+			&pce_sps_data->cmdlistptr.unlock_all_pipes,
+			&pce_sps_data->in_transfer);
+
+	if (_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer))
+		goto bad;
+
+	if (is_dummy) {
+		_qce_set_flag(&pce_sps_data->out_transfer, SPS_IOVEC_FLAG_INT);
+		rc = _qce_sps_transfer(pce_dev, req_info);
+	} else {
+		select_mode(pce_dev, preq_info);
+		rc = _qce_sps_transfer(pce_dev, req_info);
+		cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	}
+	if (rc)
+		goto bad;
+	return 0;
+bad:
+	if (preq_info->src_nents) {
+		qce_dma_unmap_sg(pce_dev->pdev, sreq->src,
+				preq_info->src_nents, DMA_TO_DEVICE);
+	}
+	qce_free_req_info(pce_dev, req_info, false);
+	return rc;
+}
+EXPORT_SYMBOL(qce_process_sha_req);
+
+int qce_f8_req(void *handle, struct qce_f8_req *req,
+			void *cookie, qce_comp_func_ptr_t qce_cb)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	bool key_stream_mode;
+	dma_addr_t dst;
+	int rc;
+	struct qce_cmdlist_info *cmdlistinfo;
+	int req_info = -1;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
+		break;
+	default:
+		qce_free_req_info(pce_dev, req_info, false);
+		return -EINVAL;
+	};
+
+	key_stream_mode = (req->data_in == NULL);
+
+	/* don't support key stream mode */
+
+	if (key_stream_mode || (req->bearer >= QCE_OTA_MAX_BEARER)) {
+		qce_free_req_info(pce_dev, req_info, false);
+		return -EINVAL;
+	}
+
+	/* F8 cipher input       */
+	preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
+					req->data_in, req->data_len,
+					(req->data_in == req->data_out) ?
+					DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+
+	/* F8 cipher output     */
+	if (req->data_in != req->data_out) {
+		dst = dma_map_single(pce_dev->pdev, req->data_out,
+				req->data_len, DMA_FROM_DEVICE);
+		preq_info->phy_ota_dst = dst;
+	} else {
+		/* in place ciphering */
+		dst = preq_info->phy_ota_src;
+		preq_info->phy_ota_dst = 0;
+	}
+	preq_info->ota_size = req->data_len;
+
+
+	/* set up crypto device */
+	if (pce_dev->support_cmd_dscr)
+		rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0,
+				 req->data_len, cmdlistinfo);
+	else
+		rc = _ce_f8_setup_direct(pce_dev, req, key_stream_mode, 1, 0,
+				 req->data_len);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for callback, and issue command to sps */
+	preq_info->areq = cookie;
+	preq_info->qce_cb = qce_cb;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_F8;
+	preq_info->req_len = req->data_len;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+
+	if (pce_dev->support_cmd_dscr)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+
+	_qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->data_len,
+					&pce_sps_data->in_transfer);
+
+	_qce_set_flag(&pce_sps_data->in_transfer,
+			SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+	_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+			&pce_sps_data->cmdlistptr.unlock_all_pipes,
+					&pce_sps_data->in_transfer);
+
+	_qce_sps_add_data((uint32_t)dst, req->data_len,
+					&pce_sps_data->out_transfer);
+
+	_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer);
+
+	select_mode(pce_dev, preq_info);
+	rc = _qce_sps_transfer(pce_dev, req_info);
+	cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	if (rc)
+		goto bad;
+	return 0;
+bad:
+	if (preq_info->phy_ota_dst != 0)
+		dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst,
+				req->data_len, DMA_FROM_DEVICE);
+	if (preq_info->phy_ota_src != 0)
+		dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+				req->data_len,
+				(req->data_in == req->data_out) ?
+					DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	qce_free_req_info(pce_dev, req_info, false);
+	return rc;
+}
+EXPORT_SYMBOL(qce_f8_req);
+
+int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq,
+			void *cookie, qce_comp_func_ptr_t qce_cb)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	uint16_t num_pkt = mreq->num_pkt;
+	uint16_t cipher_start = mreq->cipher_start;
+	uint16_t cipher_size = mreq->cipher_size;
+	struct qce_f8_req *req = &mreq->qce_f8_req;
+	uint32_t total;
+	dma_addr_t dst = 0;
+	int rc = 0;
+	struct qce_cmdlist_info *cmdlistinfo;
+	int req_info = -1;
+	struct ce_request_info *preq_info;
+	struct ce_sps_data *pce_sps_data;
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f8_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f8_snow3g;
+		break;
+	default:
+		qce_free_req_info(pce_dev, req_info, false);
+		return -EINVAL;
+	};
+
+	total = num_pkt *  req->data_len;
+
+	/* F8 cipher input       */
+	preq_info->phy_ota_src = dma_map_single(pce_dev->pdev,
+				req->data_in, total,
+				(req->data_in == req->data_out) ?
+				DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+
+	/* F8 cipher output      */
+	if (req->data_in != req->data_out) {
+		dst = dma_map_single(pce_dev->pdev, req->data_out, total,
+						DMA_FROM_DEVICE);
+		preq_info->phy_ota_dst = dst;
+	} else {
+		/* in place ciphering */
+		dst = preq_info->phy_ota_src;
+		preq_info->phy_ota_dst = 0;
+	}
+
+	preq_info->ota_size = total;
+
+	/* set up crypto device */
+	if (pce_dev->support_cmd_dscr)
+		rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start,
+			cipher_size, cmdlistinfo);
+	else
+		rc = _ce_f8_setup_direct(pce_dev, req, false, num_pkt,
+			cipher_start, cipher_size);
+	if (rc)
+		goto bad;
+
+	/* setup for callback, and issue command to sps */
+	preq_info->areq = cookie;
+	preq_info->qce_cb = qce_cb;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_F8;
+	preq_info->req_len = total;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+
+	if (pce_dev->support_cmd_dscr)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+
+	_qce_sps_add_data((uint32_t)preq_info->phy_ota_src, total,
+					&pce_sps_data->in_transfer);
+	_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+	_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+			&pce_sps_data->cmdlistptr.unlock_all_pipes,
+					&pce_sps_data->in_transfer);
+
+	_qce_sps_add_data((uint32_t)dst, total,
+					&pce_sps_data->out_transfer);
+
+	_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer);
+
+	select_mode(pce_dev, preq_info);
+	rc = _qce_sps_transfer(pce_dev, req_info);
+	cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+
+	if (rc == 0)
+		return 0;
+bad:
+	if (preq_info->phy_ota_dst)
+		dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_dst, total,
+				DMA_FROM_DEVICE);
+	dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src, total,
+				(req->data_in == req->data_out) ?
+				DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
+	qce_free_req_info(pce_dev, req_info, false);
+	return rc;
+}
+EXPORT_SYMBOL(qce_f8_multi_pkt_req);
+
+int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie,
+			qce_comp_func_ptr_t qce_cb)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	int rc;
+	struct qce_cmdlist_info *cmdlistinfo;
+	int req_info = -1;
+	struct ce_sps_data *pce_sps_data;
+	struct ce_request_info *preq_info;
+
+	req_info = qce_alloc_req_info(pce_dev);
+	if (req_info < 0)
+		return -EBUSY;
+	preq_info = &pce_dev->ce_request_info[req_info];
+	pce_sps_data = &preq_info->ce_sps;
+	switch (req->algorithm) {
+	case QCE_OTA_ALGO_KASUMI:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f9_kasumi;
+		break;
+	case QCE_OTA_ALGO_SNOW3G:
+		cmdlistinfo = &pce_sps_data->cmdlistptr.f9_snow3g;
+		break;
+	default:
+		qce_free_req_info(pce_dev, req_info, false);
+		return -EINVAL;
+	};
+
+	preq_info->phy_ota_src = dma_map_single(pce_dev->pdev, req->message,
+			req->msize, DMA_TO_DEVICE);
+
+	preq_info->ota_size = req->msize;
+
+	if (pce_dev->support_cmd_dscr)
+		rc = _ce_f9_setup(pce_dev, req, cmdlistinfo);
+	else
+		rc = _ce_f9_setup_direct(pce_dev, req);
+	if (rc < 0)
+		goto bad;
+
+	/* setup for callback, and issue command to sps */
+	preq_info->areq = cookie;
+	preq_info->qce_cb = qce_cb;
+
+	/* setup xfer type for producer callback handling */
+	preq_info->xfer_type = QCE_XFER_F9;
+	preq_info->req_len = req->msize;
+
+	_qce_sps_iovec_count_init(pce_dev, req_info);
+	if (pce_dev->support_cmd_dscr)
+		_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_LOCK, cmdlistinfo,
+					&pce_sps_data->in_transfer);
+	_qce_sps_add_data((uint32_t)preq_info->phy_ota_src, req->msize,
+					&pce_sps_data->in_transfer);
+	_qce_set_flag(&pce_sps_data->in_transfer,
+				SPS_IOVEC_FLAG_EOT|SPS_IOVEC_FLAG_NWD);
+
+	_qce_sps_add_cmd(pce_dev, SPS_IOVEC_FLAG_UNLOCK,
+			&pce_sps_data->cmdlistptr.unlock_all_pipes,
+					&pce_sps_data->in_transfer);
+
+	_qce_sps_add_data(GET_PHYS_ADDR(pce_sps_data->result_dump),
+					CRYPTO_RESULT_DUMP_SIZE,
+					  &pce_sps_data->out_transfer);
+
+	select_mode(pce_dev, preq_info);
+	rc = _qce_sps_transfer(pce_dev, req_info);
+	cmpxchg(&pce_dev->owner, QCE_OWNER_CLIENT, QCE_OWNER_NONE);
+	if (rc)
+		goto bad;
+	return 0;
+bad:
+	dma_unmap_single(pce_dev->pdev, preq_info->phy_ota_src,
+				req->msize, DMA_TO_DEVICE);
+	qce_free_req_info(pce_dev, req_info, false);
+	return rc;
+}
+EXPORT_SYMBOL(qce_f9_req);
+
+static int __qce_get_device_tree_data(struct platform_device *pdev,
+		struct qce_device *pce_dev)
+{
+	struct resource *resource;
+	int rc = 0;
+
+	pce_dev->is_shared = of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,ce-hw-shared");
+	pce_dev->support_hw_key = of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,ce-hw-key");
+
+	pce_dev->use_sw_aes_cbc_ecb_ctr_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-aes-cbc-ecb-ctr-algo");
+	pce_dev->use_sw_aead_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-aead-algo");
+	pce_dev->use_sw_aes_xts_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-aes-xts-algo");
+	pce_dev->use_sw_ahash_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-ahash-algo");
+	pce_dev->use_sw_hmac_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-hmac-algo");
+	pce_dev->use_sw_aes_ccm_algo =
+				of_property_read_bool((&pdev->dev)->of_node,
+				"qcom,use-sw-aes-ccm-algo");
+	pce_dev->support_clk_mgmt_sus_res = of_property_read_bool(
+		(&pdev->dev)->of_node, "qcom,clk-mgmt-sus-res");
+	pce_dev->support_only_core_src_clk = of_property_read_bool(
+		(&pdev->dev)->of_node, "qcom,support-core-clk-only");
+
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,bam-pipe-pair",
+				&pce_dev->ce_bam_info.pipe_pair_index)) {
+		pr_err("Fail to get bam pipe pair information.\n");
+		return -EINVAL;
+	}
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,ce-device",
+				&pce_dev->ce_bam_info.ce_device)) {
+		pr_err("Fail to get CE device information.\n");
+		return -EINVAL;
+	}
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,ce-hw-instance",
+				&pce_dev->ce_bam_info.ce_hw_instance)) {
+		pr_err("Fail to get CE hw instance information.\n");
+		return -EINVAL;
+	}
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,bam-ee",
+				&pce_dev->ce_bam_info.bam_ee)) {
+		pr_info("BAM Apps EE is not defined, setting to default 1\n");
+		pce_dev->ce_bam_info.bam_ee = 1;
+	}
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,ce-opp-freq",
+				&pce_dev->ce_opp_freq_hz)) {
+		pr_info("CE operating frequency is not defined, setting to default 100MHZ\n");
+		pce_dev->ce_opp_freq_hz = CE_CLK_100MHZ;
+	}
+	pce_dev->ce_bam_info.dest_pipe_index	=
+			2 * pce_dev->ce_bam_info.pipe_pair_index;
+	pce_dev->ce_bam_info.src_pipe_index	=
+			pce_dev->ce_bam_info.dest_pipe_index + 1;
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"crypto-base");
+	if (resource) {
+		pce_dev->phy_iobase = resource->start;
+		pce_dev->iobase = ioremap_nocache(resource->start,
+					resource_size(resource));
+		if (!pce_dev->iobase) {
+			pr_err("Can not map CRYPTO io memory\n");
+			return -ENOMEM;
+		}
+	} else {
+		pr_err("CRYPTO HW mem unavailable.\n");
+		return -ENODEV;
+	}
+
+	resource = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+							"crypto-bam-base");
+	if (resource) {
+		pce_dev->bam_mem = resource->start;
+		pce_dev->bam_mem_size = resource_size(resource);
+	} else {
+		pr_err("CRYPTO BAM mem unavailable.\n");
+		rc = -ENODEV;
+		goto err_getting_bam_info;
+	}
+
+	resource  = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (resource) {
+		pce_dev->ce_bam_info.bam_irq = resource->start;
+	} else {
+		pr_err("CRYPTO BAM IRQ unavailable.\n");
+		goto err_dev;
+	}
+	return rc;
+err_dev:
+	if (pce_dev->ce_bam_info.bam_iobase)
+		iounmap(pce_dev->ce_bam_info.bam_iobase);
+
+err_getting_bam_info:
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+
+	return rc;
+}
+
+static int __qce_init_clk(struct qce_device *pce_dev)
+{
+	int rc = 0;
+
+	pce_dev->ce_core_src_clk = clk_get(pce_dev->pdev, "core_clk_src");
+	if (!IS_ERR(pce_dev->ce_core_src_clk)) {
+		rc = clk_set_rate(pce_dev->ce_core_src_clk,
+						pce_dev->ce_opp_freq_hz);
+		if (rc) {
+			pr_err("Unable to set the core src clk @%uMhz.\n",
+					pce_dev->ce_opp_freq_hz/CE_CLK_DIV);
+			goto exit_put_core_src_clk;
+		}
+	} else {
+		if (pce_dev->support_only_core_src_clk) {
+			rc = PTR_ERR(pce_dev->ce_core_src_clk);
+			pce_dev->ce_core_src_clk = NULL;
+			pr_err("Unable to get CE core src clk\n");
+			return rc;
+		}
+		pr_warn("Unable to get CE core src clk, set to NULL\n");
+		pce_dev->ce_core_src_clk = NULL;
+	}
+
+	if (pce_dev->support_only_core_src_clk) {
+		pce_dev->ce_core_clk = NULL;
+		pce_dev->ce_clk = NULL;
+		pce_dev->ce_bus_clk = NULL;
+	} else {
+		pce_dev->ce_core_clk = clk_get(pce_dev->pdev, "core_clk");
+		if (IS_ERR(pce_dev->ce_core_clk)) {
+			rc = PTR_ERR(pce_dev->ce_core_clk);
+			pr_err("Unable to get CE core clk\n");
+			goto exit_put_core_src_clk;
+		}
+		pce_dev->ce_clk = clk_get(pce_dev->pdev, "iface_clk");
+		if (IS_ERR(pce_dev->ce_clk)) {
+			rc = PTR_ERR(pce_dev->ce_clk);
+			pr_err("Unable to get CE interface clk\n");
+			goto exit_put_core_clk;
+		}
+
+		pce_dev->ce_bus_clk = clk_get(pce_dev->pdev, "bus_clk");
+		if (IS_ERR(pce_dev->ce_bus_clk)) {
+			rc = PTR_ERR(pce_dev->ce_bus_clk);
+			pr_err("Unable to get CE BUS interface clk\n");
+			goto exit_put_iface_clk;
+		}
+	}
+	return rc;
+
+exit_put_iface_clk:
+	if (pce_dev->ce_clk)
+		clk_put(pce_dev->ce_clk);
+exit_put_core_clk:
+	if (pce_dev->ce_core_clk)
+		clk_put(pce_dev->ce_core_clk);
+exit_put_core_src_clk:
+	if (pce_dev->ce_core_src_clk)
+		clk_put(pce_dev->ce_core_src_clk);
+	pr_err("Unable to init CE clks, rc = %d\n", rc);
+	return rc;
+}
+
+static void __qce_deinit_clk(struct qce_device *pce_dev)
+{
+	if (pce_dev->ce_bus_clk)
+		clk_put(pce_dev->ce_bus_clk);
+	if (pce_dev->ce_clk)
+		clk_put(pce_dev->ce_clk);
+	if (pce_dev->ce_core_clk)
+		clk_put(pce_dev->ce_core_clk);
+	if (pce_dev->ce_core_src_clk)
+		clk_put(pce_dev->ce_core_src_clk);
+}
+
+int qce_enable_clk(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+	int rc = 0;
+
+	if (pce_dev->ce_core_src_clk) {
+		rc = clk_prepare_enable(pce_dev->ce_core_src_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE core src clk\n");
+			return rc;
+		}
+	}
+
+	if (pce_dev->support_only_core_src_clk)
+		return rc;
+
+	if (pce_dev->ce_core_clk) {
+		rc = clk_prepare_enable(pce_dev->ce_core_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE core clk\n");
+			goto exit_disable_core_src_clk;
+		}
+	}
+
+	if (pce_dev->ce_clk) {
+		rc = clk_prepare_enable(pce_dev->ce_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE iface clk\n");
+			goto exit_disable_core_clk;
+		}
+	}
+
+	if (pce_dev->ce_bus_clk) {
+		rc = clk_prepare_enable(pce_dev->ce_bus_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE BUS clk\n");
+			goto exit_disable_ce_clk;
+		}
+	}
+	return rc;
+
+exit_disable_ce_clk:
+	if (pce_dev->ce_clk)
+		clk_disable_unprepare(pce_dev->ce_clk);
+exit_disable_core_clk:
+	if (pce_dev->ce_core_clk)
+		clk_disable_unprepare(pce_dev->ce_core_clk);
+exit_disable_core_src_clk:
+	if (pce_dev->ce_core_src_clk)
+		clk_disable_unprepare(pce_dev->ce_core_src_clk);
+	return rc;
+}
+EXPORT_SYMBOL(qce_enable_clk);
+
+int qce_disable_clk(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+	int rc = 0;
+
+	if (pce_dev->ce_bus_clk)
+		clk_disable_unprepare(pce_dev->ce_bus_clk);
+	if (pce_dev->ce_clk)
+		clk_disable_unprepare(pce_dev->ce_clk);
+	if (pce_dev->ce_core_clk)
+		clk_disable_unprepare(pce_dev->ce_core_clk);
+	if (pce_dev->ce_core_src_clk)
+		clk_disable_unprepare(pce_dev->ce_core_src_clk);
+
+	return rc;
+}
+EXPORT_SYMBOL(qce_disable_clk);
+
+/* dummy req setup */
+static int setup_dummy_req(struct qce_device *pce_dev)
+{
+	char *input =
+	"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopqopqrpqrs";
+	int len = DUMMY_REQ_DATA_LEN;
+
+	memcpy(pce_dev->dummyreq_in_buf, input, len);
+	sg_set_buf(&pce_dev->dummyreq.sg, pce_dev->dummyreq_in_buf, len);
+	sg_mark_end(&pce_dev->dummyreq.sg);
+
+	pce_dev->dummyreq.sreq.alg = QCE_HASH_SHA1;
+	pce_dev->dummyreq.sreq.qce_cb = qce_dummy_complete;
+	pce_dev->dummyreq.sreq.src = &pce_dev->dummyreq.sg;
+	pce_dev->dummyreq.sreq.auth_data[0] = 0;
+	pce_dev->dummyreq.sreq.auth_data[1] = 0;
+	pce_dev->dummyreq.sreq.auth_data[2] = 0;
+	pce_dev->dummyreq.sreq.auth_data[3] = 0;
+	pce_dev->dummyreq.sreq.first_blk = 1;
+	pce_dev->dummyreq.sreq.last_blk = 1;
+	pce_dev->dummyreq.sreq.size = len;
+	pce_dev->dummyreq.sreq.areq = &pce_dev->dummyreq.areq;
+	pce_dev->dummyreq.sreq.flags = 0;
+	pce_dev->dummyreq.sreq.authkey = NULL;
+
+	pce_dev->dummyreq.areq.src = pce_dev->dummyreq.sreq.src;
+	pce_dev->dummyreq.areq.nbytes = pce_dev->dummyreq.sreq.size;
+
+	return 0;
+}
+
+/* crypto engine open function. */
+void *qce_open(struct platform_device *pdev, int *rc)
+{
+	struct qce_device *pce_dev;
+	int i;
+	static int pcedev_no = 1;
+
+	pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL);
+	if (!pce_dev) {
+		*rc = -ENOMEM;
+		pr_err("Can not allocate memory: %d\n", *rc);
+		return NULL;
+	}
+	pce_dev->pdev = &pdev->dev;
+
+	mutex_lock(&qce_iomap_mutex);
+	if (pdev->dev.of_node) {
+		*rc = __qce_get_device_tree_data(pdev, pce_dev);
+		if (*rc)
+			goto err_pce_dev;
+	} else {
+		*rc = -EINVAL;
+		pr_err("Device Node not found.\n");
+		goto err_pce_dev;
+	}
+
+	for (i = 0; i < MAX_QCE_ALLOC_BAM_REQ; i++)
+		pce_dev->ce_request_info[i].in_use = false;
+	pce_dev->ce_request_index = 0;
+
+	pce_dev->memsize = 10 * PAGE_SIZE * MAX_QCE_ALLOC_BAM_REQ;
+	pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev,
+			pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL);
+
+	if (pce_dev->coh_vmem == NULL) {
+		*rc = -ENOMEM;
+		pr_err("Can not allocate coherent memory for sps data\n");
+		goto err_iobase;
+	}
+
+	pce_dev->iovec_memsize = TOTAL_IOVEC_SPACE_PER_PIPE *
+						MAX_QCE_ALLOC_BAM_REQ * 2;
+	pce_dev->iovec_vmem = kzalloc(pce_dev->iovec_memsize, GFP_KERNEL);
+	if (pce_dev->iovec_vmem == NULL)
+		goto err_mem;
+
+	pce_dev->dummyreq_in_buf = kzalloc(DUMMY_REQ_DATA_LEN, GFP_KERNEL);
+	if (pce_dev->dummyreq_in_buf == NULL)
+		goto err_mem;
+
+	*rc = __qce_init_clk(pce_dev);
+	if (*rc)
+		goto err_mem;
+	*rc = qce_enable_clk(pce_dev);
+	if (*rc)
+		goto err_enable_clk;
+
+	if (_probe_ce_engine(pce_dev)) {
+		*rc = -ENXIO;
+		goto err;
+	}
+	*rc = 0;
+
+	qce_init_ce_cfg_val(pce_dev);
+	*rc  = qce_sps_init(pce_dev);
+	if (*rc)
+		goto err;
+	qce_setup_ce_sps_data(pce_dev);
+	qce_disable_clk(pce_dev);
+	setup_dummy_req(pce_dev);
+	atomic_set(&pce_dev->no_of_queued_req, 0);
+	pce_dev->mode = IN_INTERRUPT_MODE;
+	init_timer(&(pce_dev->timer));
+	pce_dev->timer.function = qce_multireq_timeout;
+	pce_dev->timer.data = (unsigned long)pce_dev;
+	pce_dev->timer.expires = jiffies + DELAY_IN_JIFFIES;
+	pce_dev->intr_cadence = 0;
+	pce_dev->dev_no = pcedev_no;
+	pcedev_no++;
+	pce_dev->owner = QCE_OWNER_NONE;
+	mutex_unlock(&qce_iomap_mutex);
+	return pce_dev;
+err:
+	qce_disable_clk(pce_dev);
+
+err_enable_clk:
+	__qce_deinit_clk(pce_dev);
+
+err_mem:
+	kfree(pce_dev->dummyreq_in_buf);
+	kfree(pce_dev->iovec_vmem);
+	if (pce_dev->coh_vmem)
+		dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
+			pce_dev->coh_vmem, pce_dev->coh_pmem);
+err_iobase:
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+err_pce_dev:
+	mutex_unlock(&qce_iomap_mutex);
+	kfree(pce_dev);
+	return NULL;
+}
+EXPORT_SYMBOL(qce_open);
+
+/* crypto engine close function. */
+int qce_close(void *handle)
+{
+	struct qce_device *pce_dev = (struct qce_device *) handle;
+
+	if (handle == NULL)
+		return -ENODEV;
+
+	mutex_lock(&qce_iomap_mutex);
+	qce_enable_clk(pce_dev);
+	qce_sps_exit(pce_dev);
+
+	if (pce_dev->iobase)
+		iounmap(pce_dev->iobase);
+	if (pce_dev->coh_vmem)
+		dma_free_coherent(pce_dev->pdev, pce_dev->memsize,
+				pce_dev->coh_vmem, pce_dev->coh_pmem);
+	kfree(pce_dev->dummyreq_in_buf);
+	kfree(pce_dev->iovec_vmem);
+
+	qce_disable_clk(pce_dev);
+	__qce_deinit_clk(pce_dev);
+	mutex_unlock(&qce_iomap_mutex);
+	kfree(handle);
+
+	return 0;
+}
+EXPORT_SYMBOL(qce_close);
+
+#define OTA_SUPPORT_MASK (1 << CRYPTO_ENCR_SNOW3G_SEL |\
+				1 << CRYPTO_ENCR_KASUMI_SEL |\
+				1 << CRYPTO_AUTH_SNOW3G_SEL |\
+				1 << CRYPTO_AUTH_KASUMI_SEL)
+
+int qce_hw_support(void *handle, struct ce_hw_support *ce_support)
+{
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+
+	if (ce_support == NULL)
+		return -EINVAL;
+
+	ce_support->sha1_hmac_20 = false;
+	ce_support->sha1_hmac = false;
+	ce_support->sha256_hmac = false;
+	ce_support->sha_hmac = true;
+	ce_support->cmac  = true;
+	ce_support->aes_key_192 = false;
+	ce_support->aes_xts = true;
+	if ((pce_dev->engines_avail & OTA_SUPPORT_MASK) == OTA_SUPPORT_MASK)
+		ce_support->ota = true;
+	else
+		ce_support->ota = false;
+	ce_support->bam = true;
+	ce_support->is_shared = (pce_dev->is_shared == 1) ? true : false;
+	ce_support->hw_key = pce_dev->support_hw_key;
+	ce_support->aes_ccm = true;
+	ce_support->clk_mgmt_sus_res = pce_dev->support_clk_mgmt_sus_res;
+	if (pce_dev->ce_bam_info.minor_version)
+		ce_support->aligned_only = false;
+	else
+		ce_support->aligned_only = true;
+
+	ce_support->use_sw_aes_cbc_ecb_ctr_algo =
+				pce_dev->use_sw_aes_cbc_ecb_ctr_algo;
+	ce_support->use_sw_aead_algo =
+				pce_dev->use_sw_aead_algo;
+	ce_support->use_sw_aes_xts_algo =
+				pce_dev->use_sw_aes_xts_algo;
+	ce_support->use_sw_ahash_algo =
+				pce_dev->use_sw_ahash_algo;
+	ce_support->use_sw_hmac_algo =
+				pce_dev->use_sw_hmac_algo;
+	ce_support->use_sw_aes_ccm_algo =
+				pce_dev->use_sw_aes_ccm_algo;
+	ce_support->ce_device = pce_dev->ce_bam_info.ce_device;
+	ce_support->ce_hw_instance = pce_dev->ce_bam_info.ce_hw_instance;
+	if (pce_dev->no_get_around)
+		ce_support->max_request = MAX_QCE_BAM_REQ;
+	else
+		ce_support->max_request = 1;
+	return 0;
+}
+EXPORT_SYMBOL(qce_hw_support);
+
+void qce_dump_req(void *handle)
+{
+	int i;
+	struct qce_device *pce_dev = (struct qce_device *)handle;
+
+	for (i = 0; i < MAX_QCE_BAM_REQ; i++) {
+		pr_info("qce_dump_req %d %d\n", i,
+					pce_dev->ce_request_info[i].in_use);
+		if (pce_dev->ce_request_info[i].in_use == true)
+			_qce_dump_descr_fifos(pce_dev, i);
+	}
+}
+EXPORT_SYMBOL(qce_dump_req);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Crypto Engine driver");
diff --git a/drivers/crypto/msm/qce50.h b/drivers/crypto/msm/qce50.h
new file mode 100644
index 0000000..0e60bd2
--- /dev/null
+++ b/drivers/crypto/msm/qce50.h
@@ -0,0 +1,245 @@
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#ifndef _DRIVERS_CRYPTO_MSM_QCE50_H_
+#define _DRIVERS_CRYPTO_MSM_QCE50_H_
+
+#include <linux/msm-sps.h>
+
+/* MAX Data xfer block size between BAM and CE */
+#define MAX_CE_BAM_BURST_SIZE   0x40
+#define QCEBAM_BURST_SIZE	MAX_CE_BAM_BURST_SIZE
+
+#define GET_VIRT_ADDR(x)  \
+		((uintptr_t)pce_dev->coh_vmem +			\
+		((uintptr_t)x - (uintptr_t)pce_dev->coh_pmem))
+#define GET_PHYS_ADDR(x)  \
+		(phys_addr_t)(((uintptr_t)pce_dev->coh_pmem +	\
+		((uintptr_t)x - (uintptr_t)pce_dev->coh_vmem)))
+
+#define CRYPTO_REG_SIZE 4
+#define NUM_OF_CRYPTO_AUTH_IV_REG 16
+#define NUM_OF_CRYPTO_CNTR_IV_REG 4
+#define NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG 4
+#define CRYPTO_TOTAL_REGISTERS_DUMPED   26
+#define CRYPTO_RESULT_DUMP_SIZE   \
+	ALIGN((CRYPTO_TOTAL_REGISTERS_DUMPED * CRYPTO_REG_SIZE), \
+	QCEBAM_BURST_SIZE)
+
+/* QCE max number of descriptor in a descriptor list */
+#define QCE_MAX_NUM_DESC    128
+#define SPS_MAX_PKT_SIZE  (32 * 1024  - 64)
+
+/* default bam ipc log level */
+#define QCE_BAM_DEFAULT_IPC_LOGLVL 2
+
+/* State of consumer/producer Pipe */
+enum qce_pipe_st_enum {
+	QCE_PIPE_STATE_IDLE = 0,
+	QCE_PIPE_STATE_IN_PROG = 1,
+	QCE_PIPE_STATE_COMP = 2,
+	QCE_PIPE_STATE_LAST
+};
+
+enum qce_xfer_type_enum {
+	QCE_XFER_HASHING,
+	QCE_XFER_CIPHERING,
+	QCE_XFER_AEAD,
+	QCE_XFER_F8,
+	QCE_XFER_F9,
+	QCE_XFER_TYPE_LAST
+};
+
+struct qce_sps_ep_conn_data {
+	struct sps_pipe			*pipe;
+	struct sps_connect		connect;
+	struct sps_register_event	event;
+};
+
+/* CE Result DUMP format*/
+struct ce_result_dump_format {
+	uint32_t auth_iv[NUM_OF_CRYPTO_AUTH_IV_REG];
+	uint32_t auth_byte_count[NUM_OF_CRYPTO_AUTH_BYTE_COUNT_REG];
+	uint32_t encr_cntr_iv[NUM_OF_CRYPTO_CNTR_IV_REG];
+	uint32_t status;
+	uint32_t status2;
+};
+
+struct qce_cmdlist_info {
+
+	unsigned long cmdlist;
+	struct sps_command_element *crypto_cfg;
+	struct sps_command_element *encr_seg_cfg;
+	struct sps_command_element *encr_seg_size;
+	struct sps_command_element *encr_seg_start;
+	struct sps_command_element *encr_key;
+	struct sps_command_element *encr_xts_key;
+	struct sps_command_element *encr_cntr_iv;
+	struct sps_command_element *encr_ccm_cntr_iv;
+	struct sps_command_element *encr_mask;
+	struct sps_command_element *encr_xts_du_size;
+
+	struct sps_command_element *auth_seg_cfg;
+	struct sps_command_element *auth_seg_size;
+	struct sps_command_element *auth_seg_start;
+	struct sps_command_element *auth_key;
+	struct sps_command_element *auth_iv;
+	struct sps_command_element *auth_nonce_info;
+	struct sps_command_element *auth_bytecount;
+	struct sps_command_element *seg_size;
+	struct sps_command_element *go_proc;
+	ptrdiff_t size;
+};
+
+struct qce_cmdlistptr_ops {
+	struct qce_cmdlist_info cipher_aes_128_cbc_ctr;
+	struct qce_cmdlist_info cipher_aes_256_cbc_ctr;
+	struct qce_cmdlist_info cipher_aes_128_ecb;
+	struct qce_cmdlist_info cipher_aes_256_ecb;
+	struct qce_cmdlist_info cipher_aes_128_xts;
+	struct qce_cmdlist_info cipher_aes_256_xts;
+	struct qce_cmdlist_info cipher_des_cbc;
+	struct qce_cmdlist_info cipher_des_ecb;
+	struct qce_cmdlist_info cipher_3des_cbc;
+	struct qce_cmdlist_info cipher_3des_ecb;
+	struct qce_cmdlist_info auth_sha1;
+	struct qce_cmdlist_info auth_sha256;
+	struct qce_cmdlist_info auth_sha1_hmac;
+	struct qce_cmdlist_info auth_sha256_hmac;
+	struct qce_cmdlist_info auth_aes_128_cmac;
+	struct qce_cmdlist_info auth_aes_256_cmac;
+	struct qce_cmdlist_info aead_hmac_sha1_cbc_aes_128;
+	struct qce_cmdlist_info aead_hmac_sha1_cbc_aes_256;
+	struct qce_cmdlist_info aead_hmac_sha1_cbc_des;
+	struct qce_cmdlist_info aead_hmac_sha1_cbc_3des;
+	struct qce_cmdlist_info aead_hmac_sha256_cbc_aes_128;
+	struct qce_cmdlist_info aead_hmac_sha256_cbc_aes_256;
+	struct qce_cmdlist_info aead_hmac_sha256_cbc_des;
+	struct qce_cmdlist_info aead_hmac_sha256_cbc_3des;
+	struct qce_cmdlist_info aead_aes_128_ccm;
+	struct qce_cmdlist_info aead_aes_256_ccm;
+	struct qce_cmdlist_info cipher_null;
+	struct qce_cmdlist_info f8_kasumi;
+	struct qce_cmdlist_info f8_snow3g;
+	struct qce_cmdlist_info f9_kasumi;
+	struct qce_cmdlist_info f9_snow3g;
+	struct qce_cmdlist_info unlock_all_pipes;
+};
+
+struct qce_ce_cfg_reg_setting {
+	uint32_t crypto_cfg_be;
+	uint32_t crypto_cfg_le;
+
+	uint32_t encr_cfg_aes_cbc_128;
+	uint32_t encr_cfg_aes_cbc_256;
+
+	uint32_t encr_cfg_aes_ecb_128;
+	uint32_t encr_cfg_aes_ecb_256;
+
+	uint32_t encr_cfg_aes_xts_128;
+	uint32_t encr_cfg_aes_xts_256;
+
+	uint32_t encr_cfg_aes_ctr_128;
+	uint32_t encr_cfg_aes_ctr_256;
+
+	uint32_t encr_cfg_aes_ccm_128;
+	uint32_t encr_cfg_aes_ccm_256;
+
+	uint32_t encr_cfg_des_cbc;
+	uint32_t encr_cfg_des_ecb;
+
+	uint32_t encr_cfg_3des_cbc;
+	uint32_t encr_cfg_3des_ecb;
+	uint32_t encr_cfg_kasumi;
+	uint32_t encr_cfg_snow3g;
+
+	uint32_t auth_cfg_cmac_128;
+	uint32_t auth_cfg_cmac_256;
+
+	uint32_t auth_cfg_sha1;
+	uint32_t auth_cfg_sha256;
+
+	uint32_t auth_cfg_hmac_sha1;
+	uint32_t auth_cfg_hmac_sha256;
+
+	uint32_t auth_cfg_aes_ccm_128;
+	uint32_t auth_cfg_aes_ccm_256;
+	uint32_t auth_cfg_aead_sha1_hmac;
+	uint32_t auth_cfg_aead_sha256_hmac;
+	uint32_t auth_cfg_kasumi;
+	uint32_t auth_cfg_snow3g;
+};
+
+struct ce_bam_info {
+	uint32_t			bam_irq;
+	uint32_t			bam_mem;
+	void __iomem			*bam_iobase;
+	uint32_t			ce_device;
+	uint32_t			ce_hw_instance;
+	uint32_t			bam_ee;
+	unsigned int			pipe_pair_index;
+	unsigned int			src_pipe_index;
+	unsigned int			dest_pipe_index;
+	unsigned long			bam_handle;
+	int				ce_burst_size;
+	uint32_t			minor_version;
+	struct qce_sps_ep_conn_data	producer;
+	struct qce_sps_ep_conn_data	consumer;
+};
+
+/* SPS data structure with buffers, commandlists & commmand pointer lists */
+struct ce_sps_data {
+	enum qce_pipe_st_enum producer_state;	/* Producer pipe state */
+	int consumer_status;		/* consumer pipe status */
+	int producer_status;		/* producer pipe status */
+	struct sps_transfer in_transfer;
+	struct sps_transfer out_transfer;
+	struct qce_cmdlistptr_ops cmdlistptr;
+	uint32_t result_dump; /* reuslt dump virtual address */
+	uint32_t result_dump_null;
+	uint32_t result_dump_phy; /* result dump physical address (32 bits) */
+	uint32_t result_dump_null_phy;
+
+	uint32_t ignore_buffer; /* ignore buffer virtual address */
+	struct ce_result_dump_format *result; /* ponter to result dump */
+	struct ce_result_dump_format *result_null;
+};
+
+struct ce_request_info {
+	bool in_use;
+	bool in_prog;
+	enum qce_xfer_type_enum	xfer_type;
+	struct ce_sps_data ce_sps;
+	qce_comp_func_ptr_t qce_cb;	/* qce callback function pointer */
+	void *user;
+	void *areq;
+	int assoc_nents;
+	struct scatterlist *asg;        /* Formatted associated data sg  */
+	int src_nents;
+	int dst_nents;
+	dma_addr_t phy_iv_in;
+	unsigned char dec_iv[16];
+	int dir;
+	enum qce_cipher_mode_enum mode;
+	dma_addr_t phy_ota_src;
+	dma_addr_t phy_ota_dst;
+	unsigned int ota_size;
+	unsigned int req_len;
+};
+
+struct qce_driver_stats {
+	int no_of_timeouts;
+	int no_of_dummy_reqs;
+	int current_mode;
+	int outstanding_reqs;
+};
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCE50_H */
diff --git a/drivers/crypto/msm/qce_ota.h b/drivers/crypto/msm/qce_ota.h
new file mode 100644
index 0000000..2f985fa
--- /dev/null
+++ b/drivers/crypto/msm/qce_ota.h
@@ -0,0 +1,30 @@
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+/* QTI Crypto Engine driver OTA APIi */
+
+#ifndef __CRYPTO_MSM_QCE_OTA_H
+#define __CRYPTO_MSM_QCE_OTA_H
+
+#include <linux/platform_device.h>
+#include <linux/qcota.h>
+
+
+int qce_f8_req(void *handle, struct qce_f8_req *req,
+		void *cookie, qce_comp_func_ptr_t qce_cb);
+int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *req,
+		void *cookie, qce_comp_func_ptr_t qce_cb);
+int qce_f9_req(void *handle, struct qce_f9_req *req,
+		void *cookie, qce_comp_func_ptr_t qce_cb);
+
+#endif /* __CRYPTO_MSM_QCE_OTA_H */
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
new file mode 100644
index 0000000..0860e59
--- /dev/null
+++ b/drivers/crypto/msm/qcedev.c
@@ -0,0 +1,2054 @@
+/*
+ * QTI CE device driver.
+ *
+ * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+#include <linux/mman.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/dmapool.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/miscdevice.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include <linux/msm-bus.h>
+#include <linux/qcedev.h>
+
+#include <crypto/hash.h>
+#include "qcedevi.h"
+#include "qce.h"
+
+#include <linux/compat.h>
+#include "compat_qcedev.h"
+
+#define CACHE_LINE_SIZE 32
+#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+
+static uint8_t  _std_init_vector_sha1_uint8[] =   {
+	0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
+	0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
+	0xC3, 0xD2, 0xE1, 0xF0
+};
+/* standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint8_t _std_init_vector_sha256_uint8[] = {
+	0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
+	0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
+	0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
+	0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
+};
+
+static DEFINE_MUTEX(send_cmd_lock);
+static DEFINE_MUTEX(qcedev_sent_bw_req);
+
+static void qcedev_ce_high_bw_req(struct qcedev_control *podev,
+							bool high_bw_req)
+{
+	int ret = 0;
+
+	mutex_lock(&qcedev_sent_bw_req);
+	if (high_bw_req) {
+		if (podev->high_bw_req_count == 0) {
+			ret = qce_enable_clk(podev->qce);
+			if (ret) {
+				pr_err("%s Unable enable clk\n", __func__);
+				mutex_unlock(&qcedev_sent_bw_req);
+				return;
+			}
+			ret = msm_bus_scale_client_update_request(
+					podev->bus_scale_handle, 1);
+			if (ret) {
+				pr_err("%s Unable to set to high bandwidth\n",
+							__func__);
+				ret = qce_disable_clk(podev->qce);
+				mutex_unlock(&qcedev_sent_bw_req);
+				return;
+			}
+		}
+		podev->high_bw_req_count++;
+	} else {
+		if (podev->high_bw_req_count == 1) {
+			ret = msm_bus_scale_client_update_request(
+					podev->bus_scale_handle, 0);
+			if (ret) {
+				pr_err("%s Unable to set to low bandwidth\n",
+							__func__);
+				mutex_unlock(&qcedev_sent_bw_req);
+				return;
+			}
+			ret = qce_disable_clk(podev->qce);
+			if (ret) {
+				pr_err("%s Unable disable clk\n", __func__);
+				ret = msm_bus_scale_client_update_request(
+					podev->bus_scale_handle, 1);
+				if (ret)
+					pr_err("%s Unable to set to high bandwidth\n",
+							__func__);
+				mutex_unlock(&qcedev_sent_bw_req);
+				return;
+			}
+		}
+		podev->high_bw_req_count--;
+	}
+	mutex_unlock(&qcedev_sent_bw_req);
+}
+
+#define QCEDEV_MAGIC 0x56434544 /* "qced" */
+
+static int qcedev_open(struct inode *inode, struct file *file);
+static int qcedev_release(struct inode *inode, struct file *file);
+static int start_cipher_req(struct qcedev_control *podev);
+static int start_sha_req(struct qcedev_control *podev);
+static inline long qcedev_ioctl(struct file *file,
+				unsigned int cmd, unsigned long arg);
+
+#ifdef CONFIG_COMPAT
+#include "compat_qcedev.c"
+#else
+#define compat_qcedev_ioctl	NULL
+#endif
+
+static const struct file_operations qcedev_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = qcedev_ioctl,
+	.compat_ioctl = compat_qcedev_ioctl,
+	.open = qcedev_open,
+	.release = qcedev_release,
+};
+
+static struct qcedev_control qce_dev[] = {
+	{
+		.miscdevice = {
+			.minor = MISC_DYNAMIC_MINOR,
+			.name = "qce",
+			.fops = &qcedev_fops,
+		},
+		.magic = QCEDEV_MAGIC,
+	},
+};
+
+#define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev)
+#define DEBUG_MAX_FNAME  16
+#define DEBUG_MAX_RW_BUF 1024
+
+struct qcedev_stat {
+	u32 qcedev_dec_success;
+	u32 qcedev_dec_fail;
+	u32 qcedev_enc_success;
+	u32 qcedev_enc_fail;
+	u32 qcedev_sha_success;
+	u32 qcedev_sha_fail;
+};
+
+static struct qcedev_stat _qcedev_stat;
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static int _debug_qcedev;
+
+static struct qcedev_control *qcedev_minor_to_control(unsigned int n)
+{
+	int i;
+
+	for (i = 0; i < MAX_QCE_DEVICE; i++) {
+		if (qce_dev[i].miscdevice.minor == n)
+			return &qce_dev[i];
+	}
+	return NULL;
+}
+
+static int qcedev_open(struct inode *inode, struct file *file)
+{
+	struct qcedev_handle *handle;
+	struct qcedev_control *podev;
+
+	podev = qcedev_minor_to_control(MINOR(inode->i_rdev));
+	if (podev == NULL) {
+		pr_err("%s: no such device %d\n", __func__,
+					MINOR(inode->i_rdev));
+		return -ENOENT;
+	}
+
+	handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL);
+	if (handle == NULL)
+		return -ENOMEM;
+
+	handle->cntl = podev;
+	file->private_data = handle;
+	if (podev->platform_support.bus_scale_table != NULL)
+		qcedev_ce_high_bw_req(podev, true);
+	return 0;
+}
+
+static int qcedev_release(struct inode *inode, struct file *file)
+{
+	struct qcedev_control *podev;
+	struct qcedev_handle *handle;
+
+	handle =  file->private_data;
+	podev =  handle->cntl;
+	if (podev != NULL && podev->magic != QCEDEV_MAGIC) {
+		pr_err("%s: invalid handle %p\n",
+					__func__, podev);
+	}
+	kzfree(handle);
+	file->private_data = NULL;
+	if (podev != NULL && podev->platform_support.bus_scale_table != NULL)
+		qcedev_ce_high_bw_req(podev, false);
+	return 0;
+}
+
+static void req_done(unsigned long data)
+{
+	struct qcedev_control *podev = (struct qcedev_control *)data;
+	struct qcedev_async_req *areq;
+	unsigned long flags = 0;
+	struct qcedev_async_req *new_req = NULL;
+	int ret = 0;
+
+	spin_lock_irqsave(&podev->lock, flags);
+	areq = podev->active_command;
+	podev->active_command = NULL;
+
+again:
+	if (!list_empty(&podev->ready_commands)) {
+		new_req = container_of(podev->ready_commands.next,
+						struct qcedev_async_req, list);
+		list_del(&new_req->list);
+		podev->active_command = new_req;
+		new_req->err = 0;
+		if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
+			ret = start_cipher_req(podev);
+		else
+			ret = start_sha_req(podev);
+	}
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	if (areq)
+		complete(&areq->complete);
+
+	if (new_req && ret) {
+		complete(&new_req->complete);
+		spin_lock_irqsave(&podev->lock, flags);
+		podev->active_command = NULL;
+		areq = NULL;
+		ret = 0;
+		new_req = NULL;
+		goto again;
+	}
+}
+
+void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
+	unsigned char *authdata, int ret)
+{
+	struct qcedev_sha_req *areq;
+	struct qcedev_control *pdev;
+	struct qcedev_handle *handle;
+
+	uint32_t *auth32 = (uint32_t *)authdata;
+
+	areq = (struct qcedev_sha_req *) cookie;
+	handle = (struct qcedev_handle *) areq->cookie;
+	pdev = handle->cntl;
+
+	if (digest)
+		memcpy(&handle->sha_ctxt.digest[0], digest, 32);
+
+	if (authdata) {
+		handle->sha_ctxt.auth_data[0] = auth32[0];
+		handle->sha_ctxt.auth_data[1] = auth32[1];
+		handle->sha_ctxt.auth_data[2] = auth32[2];
+		handle->sha_ctxt.auth_data[3] = auth32[3];
+	}
+
+	tasklet_schedule(&pdev->done_tasklet);
+};
+
+
+void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
+	unsigned char *iv, int ret)
+{
+	struct qcedev_cipher_req *areq;
+	struct qcedev_handle *handle;
+	struct qcedev_control *podev;
+	struct qcedev_async_req *qcedev_areq;
+
+	areq = (struct qcedev_cipher_req *) cookie;
+	handle = (struct qcedev_handle *) areq->cookie;
+	podev = handle->cntl;
+	qcedev_areq = podev->active_command;
+
+	if (iv)
+		memcpy(&qcedev_areq->cipher_op_req.iv[0], iv,
+					qcedev_areq->cipher_op_req.ivlen);
+	tasklet_schedule(&podev->done_tasklet);
+};
+
+static int start_cipher_req(struct qcedev_control *podev)
+{
+	struct qcedev_async_req *qcedev_areq;
+	struct qce_req creq;
+	int ret = 0;
+
+	/* start the command on the podev->active_command */
+	qcedev_areq = podev->active_command;
+	qcedev_areq->cipher_req.cookie = qcedev_areq->handle;
+	if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM) {
+		pr_err("%s: Use of PMEM is not supported\n", __func__);
+		goto unsupported;
+	}
+	creq.pmem = NULL;
+	switch (qcedev_areq->cipher_op_req.alg) {
+	case QCEDEV_ALG_DES:
+		creq.alg = CIPHER_ALG_DES;
+		break;
+	case QCEDEV_ALG_3DES:
+		creq.alg = CIPHER_ALG_3DES;
+		break;
+	case QCEDEV_ALG_AES:
+		creq.alg = CIPHER_ALG_AES;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	switch (qcedev_areq->cipher_op_req.mode) {
+	case QCEDEV_AES_MODE_CBC:
+	case QCEDEV_DES_MODE_CBC:
+		creq.mode = QCE_MODE_CBC;
+		break;
+	case QCEDEV_AES_MODE_ECB:
+	case QCEDEV_DES_MODE_ECB:
+		creq.mode = QCE_MODE_ECB;
+		break;
+	case QCEDEV_AES_MODE_CTR:
+		creq.mode = QCE_MODE_CTR;
+		break;
+	case QCEDEV_AES_MODE_XTS:
+		creq.mode = QCE_MODE_XTS;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	if ((creq.alg == CIPHER_ALG_AES) &&
+		(creq.mode == QCE_MODE_CTR)) {
+		creq.dir = QCE_ENCRYPT;
+	} else {
+		if (qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC)
+			creq.dir = QCE_ENCRYPT;
+		else
+			creq.dir = QCE_DECRYPT;
+	}
+
+	creq.iv = &qcedev_areq->cipher_op_req.iv[0];
+	creq.ivsize = qcedev_areq->cipher_op_req.ivlen;
+
+	creq.enckey =  &qcedev_areq->cipher_op_req.enckey[0];
+	creq.encklen = qcedev_areq->cipher_op_req.encklen;
+
+	creq.cryptlen = qcedev_areq->cipher_op_req.data_len;
+
+	if (qcedev_areq->cipher_op_req.encklen == 0) {
+		if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY)
+			|| (qcedev_areq->cipher_op_req.op ==
+				QCEDEV_OPER_DEC_NO_KEY))
+			creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY;
+		else {
+			int i;
+
+			for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+				if (qcedev_areq->cipher_op_req.enckey[i] != 0)
+					break;
+			}
+
+			if ((podev->platform_support.hw_key_support == 1) &&
+						(i == QCEDEV_MAX_KEY_SIZE))
+				creq.op = QCE_REQ_ABLK_CIPHER;
+			else {
+				ret = -EINVAL;
+				goto unsupported;
+			}
+		}
+	} else {
+		creq.op = QCE_REQ_ABLK_CIPHER;
+	}
+
+	creq.qce_cb = qcedev_cipher_req_cb;
+	creq.areq = (void *)&qcedev_areq->cipher_req;
+	creq.flags = 0;
+	ret = qce_ablk_cipher_req(podev->qce, &creq);
+unsupported:
+	if (ret)
+		qcedev_areq->err = -ENXIO;
+	else
+		qcedev_areq->err = 0;
+	return ret;
+};
+
+static int start_sha_req(struct qcedev_control *podev)
+{
+	struct qcedev_async_req *qcedev_areq;
+	struct qce_sha_req sreq;
+	int ret = 0;
+	struct qcedev_handle *handle;
+
+	/* start the command on the podev->active_command */
+	qcedev_areq = podev->active_command;
+	handle = qcedev_areq->handle;
+
+	switch (qcedev_areq->sha_op_req.alg) {
+	case QCEDEV_ALG_SHA1:
+		sreq.alg = QCE_HASH_SHA1;
+		break;
+	case QCEDEV_ALG_SHA256:
+		sreq.alg = QCE_HASH_SHA256;
+		break;
+	case QCEDEV_ALG_SHA1_HMAC:
+		if (podev->ce_support.sha_hmac) {
+			sreq.alg = QCE_HASH_SHA1_HMAC;
+			sreq.authkey = &handle->sha_ctxt.authkey[0];
+			sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
+
+		} else {
+			sreq.alg = QCE_HASH_SHA1;
+			sreq.authkey = NULL;
+		}
+		break;
+	case QCEDEV_ALG_SHA256_HMAC:
+		if (podev->ce_support.sha_hmac) {
+			sreq.alg = QCE_HASH_SHA256_HMAC;
+			sreq.authkey = &handle->sha_ctxt.authkey[0];
+			sreq.authklen = QCEDEV_MAX_SHA_BLOCK_SIZE;
+		} else {
+			sreq.alg = QCE_HASH_SHA256;
+			sreq.authkey = NULL;
+		}
+		break;
+	case QCEDEV_ALG_AES_CMAC:
+		sreq.alg = QCE_HASH_AES_CMAC;
+		sreq.authkey = &handle->sha_ctxt.authkey[0];
+		sreq.authklen = qcedev_areq->sha_op_req.authklen;
+		break;
+	default:
+		pr_err("Algorithm %d not supported, exiting\n",
+			qcedev_areq->sha_op_req.alg);
+		return -EINVAL;
+	};
+
+	qcedev_areq->sha_req.cookie = handle;
+
+	sreq.qce_cb = qcedev_sha_req_cb;
+	if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) {
+		sreq.auth_data[0] = handle->sha_ctxt.auth_data[0];
+		sreq.auth_data[1] = handle->sha_ctxt.auth_data[1];
+		sreq.auth_data[2] = handle->sha_ctxt.auth_data[2];
+		sreq.auth_data[3] = handle->sha_ctxt.auth_data[3];
+		sreq.digest = &handle->sha_ctxt.digest[0];
+		sreq.first_blk = handle->sha_ctxt.first_blk;
+		sreq.last_blk = handle->sha_ctxt.last_blk;
+	}
+	sreq.size = qcedev_areq->sha_req.sreq.nbytes;
+	sreq.src = qcedev_areq->sha_req.sreq.src;
+	sreq.areq = (void *)&qcedev_areq->sha_req;
+	sreq.flags = 0;
+
+	ret = qce_process_sha_req(podev->qce, &sreq);
+
+	if (ret)
+		qcedev_areq->err = -ENXIO;
+	else
+		qcedev_areq->err = 0;
+	return ret;
+};
+
+static int submit_req(struct qcedev_async_req *qcedev_areq,
+					struct qcedev_handle *handle)
+{
+	struct qcedev_control *podev;
+	unsigned long flags = 0;
+	int ret = 0;
+	struct qcedev_stat *pstat;
+
+	qcedev_areq->err = 0;
+	podev = handle->cntl;
+
+	spin_lock_irqsave(&podev->lock, flags);
+
+	if (podev->active_command == NULL) {
+		podev->active_command = qcedev_areq;
+		if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER)
+			ret = start_cipher_req(podev);
+		else
+			ret = start_sha_req(podev);
+	} else {
+		list_add_tail(&qcedev_areq->list, &podev->ready_commands);
+	}
+
+	if (ret != 0)
+		podev->active_command = NULL;
+
+	spin_unlock_irqrestore(&podev->lock, flags);
+
+	if (ret == 0)
+		wait_for_completion(&qcedev_areq->complete);
+
+	if (ret)
+		qcedev_areq->err = -EIO;
+
+	pstat = &_qcedev_stat;
+	if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) {
+		switch (qcedev_areq->cipher_op_req.op) {
+		case QCEDEV_OPER_DEC:
+			if (qcedev_areq->err)
+				pstat->qcedev_dec_fail++;
+			else
+				pstat->qcedev_dec_success++;
+			break;
+		case QCEDEV_OPER_ENC:
+			if (qcedev_areq->err)
+				pstat->qcedev_enc_fail++;
+			else
+				pstat->qcedev_enc_success++;
+			break;
+		default:
+			break;
+		};
+	} else {
+		if (qcedev_areq->err)
+			pstat->qcedev_sha_fail++;
+		else
+			pstat->qcedev_sha_success++;
+	}
+
+	return qcedev_areq->err;
+}
+
+static int qcedev_sha_init(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle)
+{
+	struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt;
+
+	memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt));
+	sha_ctxt->first_blk = 1;
+
+	if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) {
+		memcpy(&sha_ctxt->digest[0],
+			&_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
+		sha_ctxt->diglen = SHA1_DIGEST_SIZE;
+	} else {
+		if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) {
+			memcpy(&sha_ctxt->digest[0],
+					&_std_init_vector_sha256_uint8[0],
+					SHA256_DIGEST_SIZE);
+			sha_ctxt->diglen = SHA256_DIGEST_SIZE;
+		}
+	}
+	sha_ctxt->init_done = true;
+	return 0;
+}
+
+
+static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_handle *handle,
+				struct scatterlist *sg_src)
+{
+	int err = 0;
+	int i = 0;
+	uint32_t total;
+
+	uint8_t *user_src = NULL;
+	uint8_t *k_src = NULL;
+	uint8_t *k_buf_src = NULL;
+	uint8_t *k_align_src = NULL;
+
+	uint32_t sha_pad_len = 0;
+	uint32_t trailing_buf_len = 0;
+	uint32_t t_buf = handle->sha_ctxt.trailing_buf_len;
+	uint32_t sha_block_size;
+
+	total = qcedev_areq->sha_op_req.data_len + t_buf;
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1)
+		sha_block_size = SHA1_BLOCK_SIZE;
+	else
+		sha_block_size = SHA256_BLOCK_SIZE;
+
+	if (total <= sha_block_size) {
+		uint32_t len =  qcedev_areq->sha_op_req.data_len;
+
+		i = 0;
+
+		k_src = &handle->sha_ctxt.trailing_buf[t_buf];
+
+		/* Copy data from user src(s) */
+		while (len > 0) {
+			user_src =
+			(void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+			if (user_src && copy_from_user(k_src,
+				(void __user *)user_src,
+				qcedev_areq->sha_op_req.data[i].len))
+				return -EFAULT;
+
+			len -= qcedev_areq->sha_op_req.data[i].len;
+			k_src += qcedev_areq->sha_op_req.data[i].len;
+			i++;
+		}
+		handle->sha_ctxt.trailing_buf_len = total;
+
+		return 0;
+	}
+
+
+	k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
+				GFP_KERNEL);
+	if (k_buf_src == NULL)
+		return -ENOMEM;
+
+	k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
+							CACHE_LINE_SIZE);
+	k_src = k_align_src;
+
+	/* check for trailing buffer from previous updates and append it */
+	if (t_buf > 0) {
+		memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
+								t_buf);
+		k_src += t_buf;
+	}
+
+	/* Copy data from user src(s) */
+	user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
+	if (user_src && copy_from_user(k_src,
+				(void __user *)user_src,
+				qcedev_areq->sha_op_req.data[0].len)) {
+		kzfree(k_buf_src);
+		return -EFAULT;
+	}
+	k_src += qcedev_areq->sha_op_req.data[0].len;
+	for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) {
+		user_src = (void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+		if (user_src && copy_from_user(k_src,
+					(void __user *)user_src,
+					qcedev_areq->sha_op_req.data[i].len)) {
+			kzfree(k_buf_src);
+			return -EFAULT;
+		}
+		k_src += qcedev_areq->sha_op_req.data[i].len;
+	}
+
+	/*  get new trailing buffer */
+	sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total;
+	trailing_buf_len =  CE_SHA_BLOCK_SIZE - sha_pad_len;
+
+	qcedev_areq->sha_req.sreq.src = sg_src;
+	sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src,
+						total-trailing_buf_len);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+	qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len;
+
+	/*  update sha_ctxt trailing buf content to new trailing buf */
+	if (trailing_buf_len > 0) {
+		memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
+		memcpy(&handle->sha_ctxt.trailing_buf[0],
+			(k_src - trailing_buf_len),
+			trailing_buf_len);
+	}
+	handle->sha_ctxt.trailing_buf_len = trailing_buf_len;
+
+	err = submit_req(qcedev_areq, handle);
+
+	handle->sha_ctxt.last_blk = 0;
+	handle->sha_ctxt.first_blk = 0;
+
+	kzfree(k_buf_src);
+	return err;
+}
+
+static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_handle *handle,
+				struct scatterlist *sg_src)
+{
+	int err = 0;
+	int i = 0;
+	int j = 0;
+	int k = 0;
+	int num_entries = 0;
+	uint32_t total = 0;
+
+	if (handle->sha_ctxt.init_done == false) {
+		pr_err("%s Init was not called\n", __func__);
+		return -EINVAL;
+	}
+
+	if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) {
+
+		struct	qcedev_sha_op_req *saved_req;
+		struct	qcedev_sha_op_req req;
+		struct	qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req;
+
+		/* save the original req structure */
+		saved_req =
+			kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL);
+		if (saved_req == NULL) {
+			pr_err("%s:Can't Allocate mem:saved_req 0x%lx\n",
+						__func__, (uintptr_t)saved_req);
+			return -ENOMEM;
+		}
+		memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req));
+		memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req));
+
+		i = 0;
+		/* Address 32 KB  at a time */
+		while ((i < req.entries) && (err == 0)) {
+			if (sreq->data[i].len > QCE_MAX_OPER_DATA) {
+				sreq->data[0].len = QCE_MAX_OPER_DATA;
+				if (i > 0) {
+					sreq->data[0].vaddr =
+							sreq->data[i].vaddr;
+				}
+
+				sreq->data_len = QCE_MAX_OPER_DATA;
+				sreq->entries = 1;
+
+				err = qcedev_sha_update_max_xfer(qcedev_areq,
+								handle, sg_src);
+
+				sreq->data[i].len = req.data[i].len -
+							QCE_MAX_OPER_DATA;
+				sreq->data[i].vaddr = req.data[i].vaddr +
+							QCE_MAX_OPER_DATA;
+				req.data[i].vaddr = sreq->data[i].vaddr;
+				req.data[i].len = sreq->data[i].len;
+			} else {
+				total = 0;
+				for (j = i; j < req.entries; j++) {
+					num_entries++;
+					if ((total + sreq->data[j].len) >=
+							QCE_MAX_OPER_DATA) {
+						sreq->data[j].len =
+						(QCE_MAX_OPER_DATA - total);
+						total = QCE_MAX_OPER_DATA;
+						break;
+					}
+					total += sreq->data[j].len;
+				}
+
+				sreq->data_len = total;
+				if (i > 0)
+					for (k = 0; k < num_entries; k++) {
+						sreq->data[k].len =
+							sreq->data[i+k].len;
+						sreq->data[k].vaddr =
+							sreq->data[i+k].vaddr;
+					}
+				sreq->entries = num_entries;
+
+				i = j;
+				err = qcedev_sha_update_max_xfer(qcedev_areq,
+								handle, sg_src);
+				num_entries = 0;
+
+				sreq->data[i].vaddr = req.data[i].vaddr +
+							sreq->data[i].len;
+				sreq->data[i].len = req.data[i].len -
+							sreq->data[i].len;
+				req.data[i].vaddr = sreq->data[i].vaddr;
+				req.data[i].len = sreq->data[i].len;
+
+				if (sreq->data[i].len == 0)
+					i++;
+			}
+		} /* end of while ((i < req.entries) && (err == 0)) */
+
+		/* Restore the original req structure */
+		for (i = 0; i < saved_req->entries; i++) {
+			sreq->data[i].len = saved_req->data[i].len;
+			sreq->data[i].vaddr = saved_req->data[i].vaddr;
+		}
+		sreq->entries = saved_req->entries;
+		sreq->data_len = saved_req->data_len;
+		kzfree(saved_req);
+	} else
+		err = qcedev_sha_update_max_xfer(qcedev_areq, handle, sg_src);
+
+	return err;
+}
+
+static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_handle *handle)
+{
+	int err = 0;
+	struct scatterlist sg_src;
+	uint32_t total;
+	uint8_t *k_buf_src = NULL;
+	uint8_t *k_align_src = NULL;
+
+	if (handle->sha_ctxt.init_done == false) {
+		pr_err("%s Init was not called\n", __func__);
+		return -EINVAL;
+	}
+
+	handle->sha_ctxt.last_blk = 1;
+
+	total = handle->sha_ctxt.trailing_buf_len;
+
+	if (total) {
+		k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2,
+					GFP_KERNEL);
+		if (k_buf_src == NULL)
+			return -ENOMEM;
+
+		k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
+							CACHE_LINE_SIZE);
+		memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total);
+	}
+	qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
+	sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src, total);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+	qcedev_areq->sha_req.sreq.nbytes = total;
+
+	err = submit_req(qcedev_areq, handle);
+
+	handle->sha_ctxt.first_blk = 0;
+	handle->sha_ctxt.last_blk = 0;
+	handle->sha_ctxt.auth_data[0] = 0;
+	handle->sha_ctxt.auth_data[1] = 0;
+	handle->sha_ctxt.trailing_buf_len = 0;
+	handle->sha_ctxt.init_done = false;
+	memset(&handle->sha_ctxt.trailing_buf[0], 0, 64);
+
+	kzfree(k_buf_src);
+	return err;
+}
+
+static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq,
+					struct qcedev_handle *handle,
+					struct scatterlist *sg_src)
+{
+	int err = 0;
+	int i = 0;
+	uint32_t total;
+
+	uint8_t *user_src = NULL;
+	uint8_t *k_src = NULL;
+	uint8_t *k_buf_src = NULL;
+
+	total = qcedev_areq->sha_op_req.data_len;
+
+	if (copy_from_user(&handle->sha_ctxt.authkey[0],
+				(void __user *)qcedev_areq->sha_op_req.authkey,
+				qcedev_areq->sha_op_req.authklen))
+		return -EFAULT;
+
+
+	k_buf_src = kmalloc(total, GFP_KERNEL);
+	if (k_buf_src == NULL)
+		return -ENOMEM;
+
+	k_src = k_buf_src;
+
+	/* Copy data from user src(s) */
+	user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr;
+	for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) {
+		user_src =
+			(void __user *)qcedev_areq->sha_op_req.data[i].vaddr;
+		if (user_src && copy_from_user(k_src, (void __user *)user_src,
+				qcedev_areq->sha_op_req.data[i].len)) {
+			kzfree(k_buf_src);
+			return -EFAULT;
+		}
+		k_src += qcedev_areq->sha_op_req.data[i].len;
+	}
+
+	qcedev_areq->sha_req.sreq.src = sg_src;
+	sg_set_buf(qcedev_areq->sha_req.sreq.src, k_buf_src, total);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+	qcedev_areq->sha_req.sreq.nbytes = total;
+	handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen;
+	err = submit_req(qcedev_areq, handle);
+
+	kzfree(k_buf_src);
+	return err;
+}
+
+static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq,
+					struct qcedev_handle *handle,
+					struct scatterlist *sg_src)
+{
+	int err = 0;
+
+	if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) {
+		qcedev_sha_init(areq, handle);
+		if (copy_from_user(&handle->sha_ctxt.authkey[0],
+				(void __user *)areq->sha_op_req.authkey,
+				areq->sha_op_req.authklen))
+			return -EFAULT;
+	} else {
+		struct qcedev_async_req authkey_areq;
+		uint8_t	authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
+
+		init_completion(&authkey_areq.complete);
+
+		authkey_areq.sha_op_req.entries = 1;
+		authkey_areq.sha_op_req.data[0].vaddr =
+						areq->sha_op_req.authkey;
+		authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen;
+		authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen;
+		authkey_areq.sha_op_req.diglen = 0;
+		authkey_areq.handle = handle;
+
+		memset(&authkey_areq.sha_op_req.digest[0], 0,
+						QCEDEV_MAX_SHA_DIGEST);
+		if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
+			authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1;
+		if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)
+			authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256;
+
+		authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+
+		qcedev_sha_init(&authkey_areq, handle);
+		err = qcedev_sha_update(&authkey_areq, handle, sg_src);
+		if (!err)
+			err = qcedev_sha_final(&authkey_areq, handle);
+		else
+			return err;
+		memcpy(&authkey[0], &handle->sha_ctxt.digest[0],
+				handle->sha_ctxt.diglen);
+		qcedev_sha_init(areq, handle);
+
+		memcpy(&handle->sha_ctxt.authkey[0], &authkey[0],
+				handle->sha_ctxt.diglen);
+	}
+	return err;
+}
+
+static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_handle *handle)
+{
+	int err = 0;
+	struct scatterlist sg_src;
+	uint8_t *k_src = NULL;
+	uint32_t sha_block_size = 0;
+	uint32_t sha_digest_size = 0;
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
+		sha_digest_size = SHA1_DIGEST_SIZE;
+		sha_block_size = SHA1_BLOCK_SIZE;
+	} else {
+		if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
+			sha_digest_size = SHA256_DIGEST_SIZE;
+			sha_block_size = SHA256_BLOCK_SIZE;
+		}
+	}
+	k_src = kmalloc(sha_block_size, GFP_KERNEL);
+	if (k_src == NULL)
+		return -ENOMEM;
+
+	/* check for trailing buffer from previous updates and append it */
+	memcpy(k_src, &handle->sha_ctxt.trailing_buf[0],
+			handle->sha_ctxt.trailing_buf_len);
+
+	qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src;
+	sg_set_buf(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size);
+	sg_mark_end(qcedev_areq->sha_req.sreq.src);
+
+	qcedev_areq->sha_req.sreq.nbytes = sha_block_size;
+	memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
+	memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0],
+					sha_digest_size);
+	handle->sha_ctxt.trailing_buf_len = sha_digest_size;
+
+	handle->sha_ctxt.first_blk = 1;
+	handle->sha_ctxt.last_blk = 0;
+	handle->sha_ctxt.auth_data[0] = 0;
+	handle->sha_ctxt.auth_data[1] = 0;
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) {
+		memcpy(&handle->sha_ctxt.digest[0],
+			&_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE);
+		handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE;
+	}
+
+	if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) {
+		memcpy(&handle->sha_ctxt.digest[0],
+			&_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE);
+		handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE;
+	}
+	err = submit_req(qcedev_areq, handle);
+
+	handle->sha_ctxt.last_blk = 0;
+	handle->sha_ctxt.first_blk = 0;
+
+	kzfree(k_src);
+	return err;
+}
+
+static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle, bool ikey)
+{
+	int i;
+	uint32_t constant;
+	uint32_t sha_block_size;
+
+	if (ikey)
+		constant = 0x36;
+	else
+		constant = 0x5c;
+
+	if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)
+		sha_block_size = SHA1_BLOCK_SIZE;
+	else
+		sha_block_size = SHA256_BLOCK_SIZE;
+
+	memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size);
+	for (i = 0; i < sha_block_size; i++)
+		handle->sha_ctxt.trailing_buf[i] =
+				(handle->sha_ctxt.authkey[i] ^ constant);
+
+	handle->sha_ctxt.trailing_buf_len = sha_block_size;
+	return 0;
+}
+
+static int qcedev_hmac_init(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle,
+				struct scatterlist *sg_src)
+{
+	int err;
+	struct qcedev_control *podev = handle->cntl;
+
+	err = qcedev_set_hmac_auth_key(areq, handle, sg_src);
+	if (err)
+		return err;
+	if (!podev->ce_support.sha_hmac)
+		qcedev_hmac_update_iokey(areq, handle, true);
+	return 0;
+}
+
+static int qcedev_hmac_final(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle)
+{
+	int err;
+	struct qcedev_control *podev = handle->cntl;
+
+	err = qcedev_sha_final(areq, handle);
+	if (podev->ce_support.sha_hmac)
+		return err;
+
+	qcedev_hmac_update_iokey(areq, handle, false);
+	err = qcedev_hmac_get_ohash(areq, handle);
+	if (err)
+		return err;
+	err = qcedev_sha_final(areq, handle);
+
+	return err;
+}
+
+static int qcedev_hash_init(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle,
+				struct scatterlist *sg_src)
+{
+	if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
+		return qcedev_sha_init(areq, handle);
+	else
+		return qcedev_hmac_init(areq, handle, sg_src);
+}
+
+static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq,
+				struct qcedev_handle *handle,
+				struct scatterlist *sg_src)
+{
+	return qcedev_sha_update(qcedev_areq, handle, sg_src);
+}
+
+static int qcedev_hash_final(struct qcedev_async_req *areq,
+				struct qcedev_handle *handle)
+{
+	if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) ||
+			(areq->sha_op_req.alg == QCEDEV_ALG_SHA256))
+		return qcedev_sha_final(areq, handle);
+	else
+		return qcedev_hmac_final(areq, handle);
+}
+
+static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq,
+				int *di, struct qcedev_handle *handle,
+				uint8_t *k_align_src)
+{
+	int err = 0;
+	int i = 0;
+	int dst_i = *di;
+	struct scatterlist sg_src;
+	uint32_t byteoffset = 0;
+	uint8_t *user_src = NULL;
+	uint8_t *k_align_dst = k_align_src;
+	struct	qcedev_cipher_op_req *creq = &areq->cipher_op_req;
+
+
+	if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
+		byteoffset = areq->cipher_op_req.byteoffset;
+
+	user_src = (void __user *)areq->cipher_op_req.vbuf.src[0].vaddr;
+	if (user_src && copy_from_user((k_align_src + byteoffset),
+				(void __user *)user_src,
+				areq->cipher_op_req.vbuf.src[0].len))
+		return -EFAULT;
+
+	k_align_src += byteoffset + areq->cipher_op_req.vbuf.src[0].len;
+
+	for (i = 1; i < areq->cipher_op_req.entries; i++) {
+		user_src =
+			(void __user *)areq->cipher_op_req.vbuf.src[i].vaddr;
+		if (user_src && copy_from_user(k_align_src,
+					(void __user *)user_src,
+					areq->cipher_op_req.vbuf.src[i].len)) {
+			return -EFAULT;
+		}
+		k_align_src += areq->cipher_op_req.vbuf.src[i].len;
+	}
+
+	/* restore src beginning */
+	k_align_src = k_align_dst;
+	areq->cipher_op_req.data_len += byteoffset;
+
+	areq->cipher_req.creq.src = (struct scatterlist *) &sg_src;
+	areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src;
+
+	/* In place encryption/decryption */
+	sg_set_buf(areq->cipher_req.creq.src,
+					k_align_dst,
+					areq->cipher_op_req.data_len);
+	sg_mark_end(areq->cipher_req.creq.src);
+
+	areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len;
+	areq->cipher_req.creq.info = areq->cipher_op_req.iv;
+	areq->cipher_op_req.entries = 1;
+
+	err = submit_req(areq, handle);
+
+	/* copy data to destination buffer*/
+	creq->data_len -= byteoffset;
+
+	while (creq->data_len > 0) {
+		if (creq->vbuf.dst[dst_i].len <= creq->data_len) {
+			if (err == 0 && copy_to_user(
+				(void __user *)creq->vbuf.dst[dst_i].vaddr,
+					(k_align_dst + byteoffset),
+					creq->vbuf.dst[dst_i].len))
+				return -EFAULT;
+
+			k_align_dst += creq->vbuf.dst[dst_i].len +
+						byteoffset;
+			creq->data_len -= creq->vbuf.dst[dst_i].len;
+			dst_i++;
+		} else {
+			if (err == 0 && copy_to_user(
+				(void __user *)creq->vbuf.dst[dst_i].vaddr,
+					(k_align_dst + byteoffset),
+					creq->data_len))
+				return -EFAULT;
+
+			k_align_dst += creq->data_len;
+			creq->vbuf.dst[dst_i].len -= creq->data_len;
+			creq->vbuf.dst[dst_i].vaddr += creq->data_len;
+			creq->data_len = 0;
+		}
+	}
+	*di = dst_i;
+
+	return err;
+};
+
+static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
+						struct qcedev_handle *handle)
+{
+	int err = 0;
+	int di = 0;
+	int i = 0;
+	int j = 0;
+	int k = 0;
+	uint32_t byteoffset = 0;
+	int num_entries = 0;
+	uint32_t total = 0;
+	uint32_t len;
+	uint8_t *k_buf_src = NULL;
+	uint8_t *k_align_src = NULL;
+	uint32_t max_data_xfer;
+	struct qcedev_cipher_op_req *saved_req;
+	struct	qcedev_cipher_op_req *creq = &areq->cipher_op_req;
+
+	total = 0;
+
+	if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
+		byteoffset = areq->cipher_op_req.byteoffset;
+	k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2,
+				GFP_KERNEL);
+	if (k_buf_src == NULL)
+		return -ENOMEM;
+	k_align_src = (uint8_t *)ALIGN(((uintptr_t)k_buf_src),
+							CACHE_LINE_SIZE);
+	max_data_xfer = QCE_MAX_OPER_DATA - byteoffset;
+
+	saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL);
+	if (saved_req == NULL) {
+		kzfree(k_buf_src);
+		return -ENOMEM;
+
+	}
+	memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req));
+
+	if (areq->cipher_op_req.data_len > max_data_xfer) {
+		struct qcedev_cipher_op_req req;
+
+		/* save the original req structure */
+		memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req));
+
+		i = 0;
+		/* Address 32 KB  at a time */
+		while ((i < req.entries) && (err == 0)) {
+			if (creq->vbuf.src[i].len > max_data_xfer) {
+				creq->vbuf.src[0].len =	max_data_xfer;
+				if (i > 0) {
+					creq->vbuf.src[0].vaddr =
+						creq->vbuf.src[i].vaddr;
+				}
+
+				creq->data_len = max_data_xfer;
+				creq->entries = 1;
+
+				err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
+						&di, handle, k_align_src);
+				if (err < 0) {
+					kzfree(k_buf_src);
+					kzfree(saved_req);
+					return err;
+				}
+
+				creq->vbuf.src[i].len =	req.vbuf.src[i].len -
+							max_data_xfer;
+				creq->vbuf.src[i].vaddr =
+						req.vbuf.src[i].vaddr +
+						max_data_xfer;
+				req.vbuf.src[i].vaddr =
+						creq->vbuf.src[i].vaddr;
+				req.vbuf.src[i].len = creq->vbuf.src[i].len;
+
+			} else {
+				total = areq->cipher_op_req.byteoffset;
+				for (j = i; j < req.entries; j++) {
+					num_entries++;
+					if ((total + creq->vbuf.src[j].len)
+							>= max_data_xfer) {
+						creq->vbuf.src[j].len =
+						max_data_xfer - total;
+						total = max_data_xfer;
+						break;
+					}
+					total += creq->vbuf.src[j].len;
+				}
+
+				creq->data_len = total;
+				if (i > 0)
+					for (k = 0; k < num_entries; k++) {
+						creq->vbuf.src[k].len =
+						creq->vbuf.src[i+k].len;
+						creq->vbuf.src[k].vaddr =
+						creq->vbuf.src[i+k].vaddr;
+					}
+				creq->entries =  num_entries;
+
+				i = j;
+				err = qcedev_vbuf_ablk_cipher_max_xfer(areq,
+						&di, handle, k_align_src);
+				if (err < 0) {
+					kzfree(k_buf_src);
+					kzfree(saved_req);
+					return err;
+				}
+
+				num_entries = 0;
+				areq->cipher_op_req.byteoffset = 0;
+
+				creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr
+					+ creq->vbuf.src[i].len;
+				creq->vbuf.src[i].len =	req.vbuf.src[i].len -
+							creq->vbuf.src[i].len;
+
+				req.vbuf.src[i].vaddr =
+						creq->vbuf.src[i].vaddr;
+				req.vbuf.src[i].len = creq->vbuf.src[i].len;
+
+				if (creq->vbuf.src[i].len == 0)
+					i++;
+			}
+
+			areq->cipher_op_req.byteoffset = 0;
+			max_data_xfer = QCE_MAX_OPER_DATA;
+			byteoffset = 0;
+
+		} /* end of while ((i < req.entries) && (err == 0)) */
+	} else
+		err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle,
+								k_align_src);
+
+	/* Restore the original req structure */
+	for (i = 0; i < saved_req->entries; i++) {
+		creq->vbuf.src[i].len = saved_req->vbuf.src[i].len;
+		creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr;
+	}
+	for (len = 0, i = 0; len < saved_req->data_len; i++) {
+		creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len;
+		creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr;
+		len += saved_req->vbuf.dst[i].len;
+	}
+	creq->entries = saved_req->entries;
+	creq->data_len = saved_req->data_len;
+	creq->byteoffset = saved_req->byteoffset;
+
+	kzfree(saved_req);
+	kzfree(k_buf_src);
+	return err;
+
+}
+
+static int qcedev_check_cipher_key(struct qcedev_cipher_op_req *req,
+						struct qcedev_control *podev)
+{
+	/* if intending to use HW key make sure key fields are set
+	 * correctly and HW key is indeed supported in target
+	 */
+	if (req->encklen == 0) {
+		int i;
+
+		for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) {
+			if (req->enckey[i]) {
+				pr_err("%s: Invalid key: non-zero key input\n",
+								__func__);
+				goto error;
+			}
+		}
+		if ((req->op != QCEDEV_OPER_ENC_NO_KEY) &&
+			(req->op != QCEDEV_OPER_DEC_NO_KEY))
+			if (!podev->platform_support.hw_key_support) {
+				pr_err("%s: Invalid op %d\n", __func__,
+						(uint32_t)req->op);
+				goto error;
+			}
+	} else {
+		if (req->encklen == QCEDEV_AES_KEY_192) {
+			if (!podev->ce_support.aes_key_192) {
+				pr_err("%s: AES-192 not supported\n", __func__);
+				goto error;
+			}
+		} else {
+			/* if not using HW key make sure key
+			 * length is valid
+			 */
+			if (req->mode == QCEDEV_AES_MODE_XTS) {
+				if ((req->encklen != QCEDEV_AES_KEY_128*2) &&
+				(req->encklen != QCEDEV_AES_KEY_256*2)) {
+					pr_err("%s: unsupported key size: %d\n",
+							__func__, req->encklen);
+					goto error;
+				}
+			} else {
+				if ((req->encklen != QCEDEV_AES_KEY_128) &&
+					(req->encklen != QCEDEV_AES_KEY_256)) {
+					pr_err("%s: unsupported key size %d\n",
+							__func__, req->encklen);
+					goto error;
+				}
+			}
+		}
+	}
+	return 0;
+error:
+	return -EINVAL;
+}
+
+static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
+						struct qcedev_control *podev)
+{
+	uint32_t total = 0;
+	uint32_t i;
+
+	if (req->use_pmem) {
+		pr_err("%s: Use of PMEM is not supported\n", __func__);
+		goto error;
+	}
+	if ((req->entries == 0) || (req->data_len == 0) ||
+			(req->entries > QCEDEV_MAX_BUFFERS)) {
+		pr_err("%s: Invalid cipher length/entries\n", __func__);
+		goto error;
+	}
+	if ((req->alg >= QCEDEV_ALG_LAST) ||
+		(req->mode >= QCEDEV_AES_DES_MODE_LAST)) {
+		pr_err("%s: Invalid algorithm %d\n", __func__,
+						(uint32_t)req->alg);
+		goto error;
+	}
+	if ((req->mode == QCEDEV_AES_MODE_XTS) &&
+				(!podev->ce_support.aes_xts)) {
+		pr_err("%s: XTS algorithm is not supported\n", __func__);
+		goto error;
+	}
+	if (req->alg == QCEDEV_ALG_AES) {
+		if (qcedev_check_cipher_key(req, podev))
+			goto error;
+
+	}
+	/* if using a byteoffset, make sure it is CTR mode using vbuf */
+	if (req->byteoffset) {
+		if (req->mode != QCEDEV_AES_MODE_CTR) {
+			pr_err("%s: Operation on byte offset not supported\n",
+								 __func__);
+			goto error;
+		}
+		if (req->byteoffset >= AES_CE_BLOCK_SIZE) {
+			pr_err("%s: Invalid byte offset\n", __func__);
+			goto error;
+		}
+		total = req->byteoffset;
+		for (i = 0; i < req->entries; i++) {
+			if (total > U32_MAX - req->vbuf.src[i].len) {
+				pr_err("%s:Integer overflow on total src len\n",
+					__func__);
+				goto error;
+			}
+			total += req->vbuf.src[i].len;
+		}
+	}
+
+	if (req->data_len < req->byteoffset) {
+		pr_err("%s: req data length %u is less than byteoffset %u\n",
+				__func__, req->data_len, req->byteoffset);
+		goto error;
+	}
+
+	/* Ensure IV size */
+	if (req->ivlen > QCEDEV_MAX_IV_SIZE) {
+		pr_err("%s: ivlen is not correct: %u\n", __func__, req->ivlen);
+		goto error;
+	}
+
+	/* Ensure Key size */
+	if (req->encklen > QCEDEV_MAX_KEY_SIZE) {
+		pr_err("%s: Klen is not correct: %u\n", __func__, req->encklen);
+		goto error;
+	}
+
+	/* Ensure zer ivlen for ECB  mode  */
+	if (req->ivlen > 0) {
+		if ((req->mode == QCEDEV_AES_MODE_ECB) ||
+				(req->mode == QCEDEV_DES_MODE_ECB)) {
+			pr_err("%s: Expecting a zero length IV\n", __func__);
+			goto error;
+		}
+	} else {
+		if ((req->mode != QCEDEV_AES_MODE_ECB) &&
+				(req->mode != QCEDEV_DES_MODE_ECB)) {
+			pr_err("%s: Expecting a non-zero ength IV\n", __func__);
+			goto error;
+		}
+	}
+	/* Check for sum of all dst length is equal to data_len  */
+	for (i = 0, total = 0; i < req->entries; i++) {
+		if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
+			pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
+				__func__, i, req->vbuf.dst[i].len);
+			goto error;
+		}
+		if (req->vbuf.dst[i].len >= U32_MAX - total) {
+			pr_err("%s: Integer overflow on total req dst vbuf length\n",
+				__func__);
+			goto error;
+		}
+		total += req->vbuf.dst[i].len;
+	}
+	if (total != req->data_len) {
+		pr_err("%s: Total (i=%d) dst(%d) buf size != data_len (%d)\n",
+			__func__, i, total, req->data_len);
+		goto error;
+	}
+	/* Check for sum of all src length is equal to data_len  */
+	for (i = 0, total = 0; i < req->entries; i++) {
+		if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
+			pr_err("%s: NULL req src vbuf[%d] with length %d\n",
+				__func__, i, req->vbuf.src[i].len);
+			goto error;
+		}
+		if (req->vbuf.src[i].len > U32_MAX - total) {
+			pr_err("%s: Integer overflow on total req src vbuf length\n",
+				__func__);
+			goto error;
+		}
+		total += req->vbuf.src[i].len;
+	}
+	if (total != req->data_len) {
+		pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
+			__func__, total, req->data_len);
+		goto error;
+	}
+	return 0;
+error:
+	return -EINVAL;
+
+}
+
+static int qcedev_check_sha_params(struct qcedev_sha_op_req *req,
+						struct qcedev_control *podev)
+{
+	uint32_t total = 0;
+	uint32_t i;
+
+	if ((req->alg == QCEDEV_ALG_AES_CMAC) &&
+				(!podev->ce_support.cmac)) {
+		pr_err("%s: CMAC not supported\n", __func__);
+		goto sha_error;
+	}
+	if ((!req->entries) || (req->entries > QCEDEV_MAX_BUFFERS)) {
+		pr_err("%s: Invalid num entries (%d)\n",
+						__func__, req->entries);
+		goto sha_error;
+	}
+
+	if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST) {
+		pr_err("%s: Invalid algorithm (%d)\n", __func__, req->alg);
+		goto sha_error;
+	}
+	if ((req->alg == QCEDEV_ALG_SHA1_HMAC) ||
+			(req->alg == QCEDEV_ALG_SHA1_HMAC)) {
+		if (req->authkey == NULL) {
+			pr_err("%s: Invalid authkey pointer\n", __func__);
+			goto sha_error;
+		}
+		if (req->authklen <= 0) {
+			pr_err("%s: Invalid authkey length (%d)\n",
+						__func__, req->authklen);
+			goto sha_error;
+		}
+	}
+
+	if (req->alg == QCEDEV_ALG_AES_CMAC) {
+		if ((req->authklen != QCEDEV_AES_KEY_128) &&
+					(req->authklen != QCEDEV_AES_KEY_256)) {
+			pr_err("%s: unsupported key length\n", __func__);
+			goto sha_error;
+		}
+	}
+
+	/* Check for sum of all src length is equal to data_len  */
+	for (i = 0, total = 0; i < req->entries; i++) {
+		if (req->data[i].len > U32_MAX - total) {
+			pr_err("%s: Integer overflow on total req buf length\n",
+				__func__);
+			goto sha_error;
+		}
+		total += req->data[i].len;
+	}
+
+	if (total != req->data_len) {
+		pr_err("%s: Total src(%d) buf size != data_len (%d)\n",
+			__func__, total, req->data_len);
+		goto sha_error;
+	}
+	return 0;
+sha_error:
+	return -EINVAL;
+}
+
+static inline long qcedev_ioctl(struct file *file,
+				unsigned int cmd, unsigned long arg)
+{
+	int err = 0;
+	struct qcedev_handle *handle;
+	struct qcedev_control *podev;
+	struct qcedev_async_req qcedev_areq;
+	struct qcedev_stat *pstat;
+
+	handle =  file->private_data;
+	podev =  handle->cntl;
+	qcedev_areq.handle = handle;
+	if (podev == NULL || podev->magic != QCEDEV_MAGIC) {
+		pr_err("%s: invalid handle %p\n",
+			__func__, podev);
+		return -ENOENT;
+	}
+
+	/* Verify user arguments. */
+	if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC)
+		return -ENOTTY;
+
+	init_completion(&qcedev_areq.complete);
+	pstat = &_qcedev_stat;
+
+	switch (cmd) {
+	case QCEDEV_IOCTL_ENC_REQ:
+	case QCEDEV_IOCTL_DEC_REQ:
+		if (copy_from_user(&qcedev_areq.cipher_op_req,
+				(void __user *)arg,
+				sizeof(struct qcedev_cipher_op_req)))
+			return -EFAULT;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_CIPHER;
+
+		if (qcedev_check_cipher_params(&qcedev_areq.cipher_op_req,
+				podev))
+			return -EINVAL;
+
+		err = qcedev_vbuf_ablk_cipher(&qcedev_areq, handle);
+		if (err)
+			return err;
+		if (copy_to_user((void __user *)arg,
+					&qcedev_areq.cipher_op_req,
+					sizeof(struct qcedev_cipher_op_req)))
+			return -EFAULT;
+		break;
+
+	case QCEDEV_IOCTL_SHA_INIT_REQ:
+		{
+		struct scatterlist sg_src;
+
+		if (copy_from_user(&qcedev_areq.sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+			return -EINVAL;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+		err = qcedev_hash_init(&qcedev_areq, handle, &sg_src);
+		if (err)
+			return err;
+		if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		}
+		handle->sha_ctxt.init_done = true;
+		break;
+	case QCEDEV_IOCTL_GET_CMAC_REQ:
+		if (!podev->ce_support.cmac)
+			return -ENOTTY;
+	case QCEDEV_IOCTL_SHA_UPDATE_REQ:
+		{
+		struct scatterlist sg_src;
+
+		if (copy_from_user(&qcedev_areq.sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+			return -EINVAL;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+
+		if (qcedev_areq.sha_op_req.alg == QCEDEV_ALG_AES_CMAC) {
+			err = qcedev_hash_cmac(&qcedev_areq, handle, &sg_src);
+			if (err)
+				return err;
+		} else {
+			if (handle->sha_ctxt.init_done == false) {
+				pr_err("%s Init was not called\n", __func__);
+				return -EINVAL;
+			}
+			err = qcedev_hash_update(&qcedev_areq, handle, &sg_src);
+			if (err)
+				return err;
+		}
+
+		if (handle->sha_ctxt.diglen > QCEDEV_MAX_SHA_DIGEST) {
+			pr_err("Invalid sha_ctxt.diglen %d\n",
+					handle->sha_ctxt.diglen);
+			return -EINVAL;
+		}
+		memcpy(&qcedev_areq.sha_op_req.digest[0],
+				&handle->sha_ctxt.digest[0],
+				handle->sha_ctxt.diglen);
+		if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		}
+		break;
+
+	case QCEDEV_IOCTL_SHA_FINAL_REQ:
+
+		if (handle->sha_ctxt.init_done == false) {
+			pr_err("%s Init was not called\n", __func__);
+			return -EINVAL;
+		}
+		if (copy_from_user(&qcedev_areq.sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+			return -EINVAL;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+		err = qcedev_hash_final(&qcedev_areq, handle);
+		if (err)
+			return err;
+		qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen;
+		memcpy(&qcedev_areq.sha_op_req.digest[0],
+				&handle->sha_ctxt.digest[0],
+				handle->sha_ctxt.diglen);
+		if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		handle->sha_ctxt.init_done = false;
+		break;
+
+	case QCEDEV_IOCTL_GET_SHA_REQ:
+		{
+		struct scatterlist sg_src;
+
+		if (copy_from_user(&qcedev_areq.sha_op_req,
+					(void __user *)arg,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev))
+			return -EINVAL;
+		qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA;
+		qcedev_hash_init(&qcedev_areq, handle, &sg_src);
+		err = qcedev_hash_update(&qcedev_areq, handle, &sg_src);
+		if (err)
+			return err;
+		err = qcedev_hash_final(&qcedev_areq, handle);
+		if (err)
+			return err;
+		qcedev_areq.sha_op_req.diglen =	handle->sha_ctxt.diglen;
+		memcpy(&qcedev_areq.sha_op_req.digest[0],
+				&handle->sha_ctxt.digest[0],
+				handle->sha_ctxt.diglen);
+		if (copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req,
+					sizeof(struct qcedev_sha_op_req)))
+			return -EFAULT;
+		}
+		break;
+
+	default:
+		return -ENOTTY;
+	}
+
+	return err;
+}
+
+static int qcedev_probe(struct platform_device *pdev)
+{
+	void *handle = NULL;
+	int rc = 0;
+	struct qcedev_control *podev;
+	struct msm_ce_hw_support *platform_support;
+
+	podev = &qce_dev[0];
+
+	podev->high_bw_req_count = 0;
+	INIT_LIST_HEAD(&podev->ready_commands);
+	podev->active_command = NULL;
+
+	spin_lock_init(&podev->lock);
+
+	tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev);
+
+	/* open qce */
+	handle = qce_open(pdev, &rc);
+	if (handle == NULL) {
+		platform_set_drvdata(pdev, NULL);
+		return rc;
+	}
+
+	podev->qce = handle;
+	podev->pdev = pdev;
+	platform_set_drvdata(pdev, podev);
+
+	rc = misc_register(&podev->miscdevice);
+	qce_hw_support(podev->qce, &podev->ce_support);
+	if (podev->ce_support.bam) {
+		podev->platform_support.ce_shared = 0;
+		podev->platform_support.shared_ce_resource = 0;
+		podev->platform_support.hw_key_support =
+						podev->ce_support.hw_key;
+		podev->platform_support.bus_scale_table = NULL;
+		podev->platform_support.sha_hmac = 1;
+
+		podev->platform_support.bus_scale_table =
+			(struct msm_bus_scale_pdata *)
+					msm_bus_cl_get_pdata(pdev);
+		if (!podev->platform_support.bus_scale_table)
+			pr_err("bus_scale_table is NULL\n");
+	} else {
+		platform_support =
+			(struct msm_ce_hw_support *)pdev->dev.platform_data;
+		podev->platform_support.ce_shared = platform_support->ce_shared;
+		podev->platform_support.shared_ce_resource =
+				platform_support->shared_ce_resource;
+		podev->platform_support.hw_key_support =
+				platform_support->hw_key_support;
+		podev->platform_support.bus_scale_table =
+				platform_support->bus_scale_table;
+		podev->platform_support.sha_hmac = platform_support->sha_hmac;
+	}
+	if (podev->platform_support.bus_scale_table != NULL) {
+		podev->bus_scale_handle =
+			msm_bus_scale_register_client(
+				(struct msm_bus_scale_pdata *)
+				podev->platform_support.bus_scale_table);
+		if (!podev->bus_scale_handle) {
+			pr_err("%s not able to get bus scale\n",
+				__func__);
+			rc =  -ENOMEM;
+			goto err;
+		}
+	}
+
+	if (rc >= 0)
+		return 0;
+
+	if (podev->platform_support.bus_scale_table != NULL)
+		msm_bus_scale_unregister_client(podev->bus_scale_handle);
+err:
+
+	if (handle)
+		qce_close(handle);
+	platform_set_drvdata(pdev, NULL);
+	podev->qce = NULL;
+	podev->pdev = NULL;
+	return rc;
+};
+
+static int qcedev_remove(struct platform_device *pdev)
+{
+	struct qcedev_control *podev;
+
+	podev = platform_get_drvdata(pdev);
+	if (!podev)
+		return 0;
+	if (podev->qce)
+		qce_close(podev->qce);
+
+	if (podev->platform_support.bus_scale_table != NULL)
+		msm_bus_scale_unregister_client(podev->bus_scale_handle);
+
+	if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR)
+		misc_deregister(&podev->miscdevice);
+	tasklet_kill(&podev->done_tasklet);
+	return 0;
+};
+
+static int qcedev_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	struct qcedev_control *podev;
+	int ret;
+
+	podev = platform_get_drvdata(pdev);
+
+	if (!podev || !podev->platform_support.bus_scale_table)
+		return 0;
+
+	mutex_lock(&qcedev_sent_bw_req);
+	if (podev->high_bw_req_count) {
+		ret = msm_bus_scale_client_update_request(
+				podev->bus_scale_handle, 0);
+		if (ret) {
+			pr_err("%s Unable to set to low bandwidth\n",
+						__func__);
+			goto suspend_exit;
+		}
+		ret = qce_disable_clk(podev->qce);
+		if (ret) {
+			pr_err("%s Unable disable clk\n", __func__);
+			ret = msm_bus_scale_client_update_request(
+				podev->bus_scale_handle, 1);
+			if (ret)
+				pr_err("%s Unable to set to high bandwidth\n",
+					__func__);
+			goto suspend_exit;
+		}
+	}
+
+suspend_exit:
+	mutex_unlock(&qcedev_sent_bw_req);
+	return 0;
+}
+
+static int qcedev_resume(struct platform_device *pdev)
+{
+	struct qcedev_control *podev;
+	int ret;
+
+	podev = platform_get_drvdata(pdev);
+
+	if (!podev || !podev->platform_support.bus_scale_table)
+		return 0;
+
+	mutex_lock(&qcedev_sent_bw_req);
+	if (podev->high_bw_req_count) {
+		ret = qce_enable_clk(podev->qce);
+		if (ret) {
+			pr_err("%s Unable enable clk\n", __func__);
+			goto resume_exit;
+		}
+		ret = msm_bus_scale_client_update_request(
+				podev->bus_scale_handle, 1);
+		if (ret) {
+			pr_err("%s Unable to set to high bandwidth\n",
+						__func__);
+			ret = qce_disable_clk(podev->qce);
+			if (ret)
+				pr_err("%s Unable enable clk\n",
+					__func__);
+			goto resume_exit;
+		}
+	}
+
+resume_exit:
+	mutex_unlock(&qcedev_sent_bw_req);
+	return 0;
+}
+
+static const struct of_device_id qcedev_match[] = {
+	{	.compatible = "qcom,qcedev",
+	},
+	{}
+};
+
+static struct platform_driver qcedev_plat_driver = {
+	.probe = qcedev_probe,
+	.remove = qcedev_remove,
+	.suspend = qcedev_suspend,
+	.resume = qcedev_resume,
+	.driver = {
+		.name = "qce",
+		.owner = THIS_MODULE,
+		.of_match_table = qcedev_match,
+	},
+};
+
+static int _disp_stats(int id)
+{
+	struct qcedev_stat *pstat;
+	int len = 0;
+
+	pstat = &_qcedev_stat;
+	len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+			"\nQTI QCE dev driver %d Statistics:\n",
+				id + 1);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Encryption operation success       : %d\n",
+					pstat->qcedev_enc_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Encryption operation fail   : %d\n",
+					pstat->qcedev_enc_fail);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Decryption operation success     : %d\n",
+					pstat->qcedev_dec_success);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   Encryption operation fail          : %d\n",
+					pstat->qcedev_dec_fail);
+
+	return len;
+}
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	ssize_t rc = -EINVAL;
+	int qcedev = *((int *) file->private_data);
+	int len;
+
+	len = _disp_stats(qcedev);
+
+	if (len <= count)
+		rc = simple_read_from_buffer((void __user *) buf, len,
+			ppos, (void *) _debug_read_buf, len);
+	return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	memset((char *)&_qcedev_stat, 0, sizeof(struct qcedev_stat));
+	return count;
+};
+
+static const struct file_operations _debug_stats_ops = {
+	.open =         _debug_stats_open,
+	.read =         _debug_stats_read,
+	.write =        _debug_stats_write,
+};
+
+static int _qcedev_debug_init(void)
+{
+	int rc;
+	char name[DEBUG_MAX_FNAME];
+	struct dentry *dent;
+
+	_debug_dent = debugfs_create_dir("qcedev", NULL);
+	if (IS_ERR(_debug_dent)) {
+		pr_err("qcedev debugfs_create_dir fail, error %ld\n",
+				PTR_ERR(_debug_dent));
+		return PTR_ERR(_debug_dent);
+	}
+
+	snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
+	_debug_qcedev = 0;
+	dent = debugfs_create_file(name, 0644, _debug_dent,
+			&_debug_qcedev, &_debug_stats_ops);
+	if (dent == NULL) {
+		pr_err("qcedev debugfs_create_file fail, error %ld\n",
+				PTR_ERR(dent));
+		rc = PTR_ERR(dent);
+		goto err;
+	}
+	return 0;
+err:
+	debugfs_remove_recursive(_debug_dent);
+	return rc;
+}
+
+static int qcedev_init(void)
+{
+	int rc;
+
+	rc = _qcedev_debug_init();
+	if (rc)
+		return rc;
+	return platform_driver_register(&qcedev_plat_driver);
+}
+
+static void qcedev_exit(void)
+{
+	debugfs_remove_recursive(_debug_dent);
+	platform_driver_unregister(&qcedev_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI DEV Crypto driver");
+
+module_init(qcedev_init);
+module_exit(qcedev_exit);
diff --git a/drivers/crypto/msm/qcedevi.h b/drivers/crypto/msm/qcedevi.h
new file mode 100644
index 0000000..c26ed71
--- /dev/null
+++ b/drivers/crypto/msm/qcedevi.h
@@ -0,0 +1,125 @@
+/* QTI crypto Driver
+ *
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __CRYPTO_MSM_QCEDEVI_H
+#define __CRYPTO_MSM_QCEDEVI_H
+
+#include <linux/interrupt.h>
+#include <linux/miscdevice.h>
+#include <crypto/hash.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include <linux/fips_status.h>
+#include "qce.h"
+
+#define CACHE_LINE_SIZE 32
+#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE
+
+enum qcedev_crypto_oper_type {
+	QCEDEV_CRYPTO_OPER_CIPHER = 0,
+	QCEDEV_CRYPTO_OPER_SHA = 1,
+	QCEDEV_CRYPTO_OPER_LAST
+};
+
+struct qcedev_handle;
+
+struct qcedev_cipher_req {
+	struct ablkcipher_request creq;
+	void *cookie;
+};
+
+struct qcedev_sha_req {
+	struct ahash_request sreq;
+	void *cookie;
+};
+
+struct	qcedev_sha_ctxt {
+	uint32_t	auth_data[4];
+	uint8_t	digest[QCEDEV_MAX_SHA_DIGEST];
+	uint32_t	diglen;
+	uint8_t	trailing_buf[64];
+	uint32_t	trailing_buf_len;
+	uint8_t	first_blk;
+	uint8_t	last_blk;
+	uint8_t	authkey[QCEDEV_MAX_SHA_BLOCK_SIZE];
+	bool		init_done;
+};
+
+struct qcedev_async_req {
+	struct list_head			list;
+	struct completion			complete;
+	enum qcedev_crypto_oper_type		op_type;
+	union {
+		struct qcedev_cipher_op_req	cipher_op_req;
+		struct qcedev_sha_op_req	sha_op_req;
+	};
+
+	union {
+		struct qcedev_cipher_req	cipher_req;
+		struct qcedev_sha_req		sha_req;
+	};
+	struct qcedev_handle			*handle;
+	int					err;
+};
+
+/**********************************************************************
+ * Register ourselves as a misc device to be able to access the dev driver
+ * from userspace.
+ */
+
+#define QCEDEV_DEV	"qcedev"
+
+struct qcedev_control {
+
+	/* CE features supported by platform */
+	struct msm_ce_hw_support platform_support;
+
+	uint32_t ce_lock_count;
+	uint32_t high_bw_req_count;
+
+	/* CE features/algorithms supported by HW engine*/
+	struct ce_hw_support ce_support;
+
+	uint32_t  bus_scale_handle;
+
+	/* misc device */
+	struct miscdevice miscdevice;
+
+	/* qce handle */
+	void *qce;
+
+	/* platform device */
+	struct platform_device *pdev;
+
+	unsigned int magic;
+
+	struct list_head ready_commands;
+	struct qcedev_async_req *active_command;
+	spinlock_t lock;
+	struct tasklet_struct done_tasklet;
+};
+
+struct qcedev_handle {
+	/* qcedev control handle */
+	struct qcedev_control *cntl;
+	/* qce internal sha context*/
+	struct qcedev_sha_ctxt sha_ctxt;
+};
+
+void qcedev_cipher_req_cb(void *cookie, unsigned char *icv,
+	unsigned char *iv, int ret);
+
+void qcedev_sha_req_cb(void *cookie, unsigned char *digest,
+	unsigned char *authdata, int ret);
+
+#endif  /* __CRYPTO_MSM_QCEDEVI_H */
diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c
new file mode 100644
index 0000000..f184ee1
--- /dev/null
+++ b/drivers/crypto/msm/qcrypto.c
@@ -0,0 +1,5515 @@
+/*
+ * QTI Crypto driver
+ *
+ * Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/cpu.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/crypto.h>
+#include <linux/kernel.h>
+#include <linux/rtnetlink.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/llist.h>
+#include <linux/debugfs.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/cache.h>
+#include <linux/platform_data/qcom_crypto_device.h>
+#include <linux/msm-bus.h>
+#include <linux/hardirq.h>
+#include <linux/qcrypto.h>
+
+#include <crypto/ctr.h>
+#include <crypto/des.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+#include <crypto/hash.h>
+#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/scatterwalk.h>
+#include <crypto/skcipher.h>
+#include <crypto/internal/skcipher.h>
+#include <crypto/internal/hash.h>
+#include <crypto/internal/aead.h>
+
+#include <linux/fips_status.h>
+
+#include "qce.h"
+
+#define DEBUG_MAX_FNAME  16
+#define DEBUG_MAX_RW_BUF 4096
+#define QCRYPTO_BIG_NUMBER 9999999 /* a big number */
+
+/*
+ * For crypto 5.0 which has burst size alignment requirement.
+ */
+#define MAX_ALIGN_SIZE  0x40
+
+#define QCRYPTO_HIGH_BANDWIDTH_TIMEOUT 1000
+
+
+
+/* Status of response workq */
+enum resp_workq_sts {
+	NOT_SCHEDULED  = 0,
+	IS_SCHEDULED   = 1,
+	SCHEDULE_AGAIN = 2
+};
+
+/* Status of req processing by CEs */
+enum req_processing_sts {
+	STOPPED     = 0,
+	IN_PROGRESS = 1
+};
+
+enum qcrypto_bus_state {
+	BUS_NO_BANDWIDTH = 0,
+	BUS_HAS_BANDWIDTH,
+	BUS_BANDWIDTH_RELEASING,
+	BUS_BANDWIDTH_ALLOCATING,
+	BUS_SUSPENDED,
+	BUS_SUSPENDING,
+};
+
+struct crypto_stat {
+	u64 aead_sha1_aes_enc;
+	u64 aead_sha1_aes_dec;
+	u64 aead_sha1_des_enc;
+	u64 aead_sha1_des_dec;
+	u64 aead_sha1_3des_enc;
+	u64 aead_sha1_3des_dec;
+	u64 aead_sha256_aes_enc;
+	u64 aead_sha256_aes_dec;
+	u64 aead_sha256_des_enc;
+	u64 aead_sha256_des_dec;
+	u64 aead_sha256_3des_enc;
+	u64 aead_sha256_3des_dec;
+	u64 aead_ccm_aes_enc;
+	u64 aead_ccm_aes_dec;
+	u64 aead_rfc4309_ccm_aes_enc;
+	u64 aead_rfc4309_ccm_aes_dec;
+	u64 aead_op_success;
+	u64 aead_op_fail;
+	u64 aead_bad_msg;
+	u64 ablk_cipher_aes_enc;
+	u64 ablk_cipher_aes_dec;
+	u64 ablk_cipher_des_enc;
+	u64 ablk_cipher_des_dec;
+	u64 ablk_cipher_3des_enc;
+	u64 ablk_cipher_3des_dec;
+	u64 ablk_cipher_op_success;
+	u64 ablk_cipher_op_fail;
+	u64 sha1_digest;
+	u64 sha256_digest;
+	u64 sha1_hmac_digest;
+	u64 sha256_hmac_digest;
+	u64 ahash_op_success;
+	u64 ahash_op_fail;
+};
+static struct crypto_stat _qcrypto_stat;
+static struct dentry *_debug_dent;
+static char _debug_read_buf[DEBUG_MAX_RW_BUF];
+static bool _qcrypto_init_assign;
+struct crypto_priv;
+struct qcrypto_req_control {
+	unsigned int index;
+	bool in_use;
+	struct crypto_engine *pce;
+	struct crypto_async_request *req;
+	struct qcrypto_resp_ctx *arsp;
+	int res; /* execution result */
+};
+
+struct crypto_engine {
+	struct list_head elist;
+	void *qce; /* qce handle */
+	struct platform_device *pdev; /* platform device */
+	struct crypto_priv *pcp;
+	uint32_t  bus_scale_handle;
+	struct crypto_queue req_queue;	/*
+					 * request queue for those requests
+					 * that have this engine assigned
+					 * waiting to be executed
+					 */
+	u64 total_req;
+	u64 err_req;
+	u32 unit;
+	u32 ce_device;
+	u32 ce_hw_instance;
+	unsigned int signature;
+
+	enum qcrypto_bus_state bw_state;
+	bool   high_bw_req;
+	struct timer_list bw_reaper_timer;
+	struct work_struct bw_reaper_ws;
+	struct work_struct bw_allocate_ws;
+
+	/* engine execution sequence number */
+	u32    active_seq;
+	/* last QCRYPTO_HIGH_BANDWIDTH_TIMEOUT active_seq */
+	u32    last_active_seq;
+
+	bool   check_flag;
+	/*Added to support multi-requests*/
+	unsigned int max_req;
+	struct   qcrypto_req_control *preq_pool;
+	atomic_t req_count;
+	bool issue_req;		/* an request is being issued to qce */
+	bool first_engine;	/* this engine is the first engine or not */
+	unsigned int irq_cpu;	/* the cpu running the irq of this engine */
+	unsigned int max_req_used; /* debug stats */
+};
+
+#define MAX_SMP_CPU    8
+
+struct crypto_priv {
+	/* CE features supported by target device*/
+	struct msm_ce_hw_support platform_support;
+
+	/* CE features/algorithms supported by HW engine*/
+	struct ce_hw_support ce_support;
+
+	/* the lock protects crypto queue and req */
+	spinlock_t lock;
+
+	/* list of  registered algorithms */
+	struct list_head alg_list;
+
+	/* current active request */
+	struct crypto_async_request *req;
+
+	struct work_struct unlock_ce_ws;
+	struct list_head engine_list; /* list of  qcrypto engines */
+	int32_t total_units;   /* total units of engines */
+	struct mutex engine_lock;
+
+	struct crypto_engine *next_engine; /* next assign engine */
+	struct crypto_queue req_queue;	/*
+					 * request queue for those requests
+					 * that waiting for an available
+					 * engine.
+					 */
+	struct llist_head ordered_resp_list;	/* Queue to maintain
+						 * responses in sequence.
+						 */
+	atomic_t resp_cnt;
+	struct workqueue_struct *resp_wq;
+	struct work_struct resp_work;	/*
+					 * Workq to send responses
+					 * in sequence.
+					 */
+	enum resp_workq_sts sched_resp_workq_status;
+	enum req_processing_sts ce_req_proc_sts;
+	int cpu_getting_irqs_frm_first_ce;
+	struct crypto_engine *first_engine;
+	struct crypto_engine *scheduled_eng; /* last engine scheduled */
+
+	/* debug stats */
+	unsigned int no_avail;
+	unsigned int resp_stop;
+	unsigned int resp_start;
+	unsigned int max_qlen;
+	unsigned int queue_work_eng3;
+	unsigned int queue_work_not_eng3;
+	unsigned int queue_work_not_eng3_nz;
+	unsigned int max_resp_qlen;
+	unsigned int max_reorder_cnt;
+	unsigned int cpu_req[MAX_SMP_CPU+1];
+};
+static struct crypto_priv qcrypto_dev;
+static struct crypto_engine *_qcrypto_static_assign_engine(
+					struct crypto_priv *cp);
+static struct crypto_engine *_avail_eng(struct crypto_priv *cp);
+static struct qcrypto_req_control *qcrypto_alloc_req_control(
+						struct crypto_engine *pce)
+{
+	int i;
+	struct qcrypto_req_control *pqcrypto_req_control = pce->preq_pool;
+	unsigned int req_count;
+
+	for (i = 0; i < pce->max_req; i++) {
+		if (xchg(&pqcrypto_req_control->in_use, true) == false) {
+			req_count = atomic_inc_return(&pce->req_count);
+			if (req_count > pce->max_req_used)
+				pce->max_req_used = req_count;
+			return pqcrypto_req_control;
+		}
+		pqcrypto_req_control++;
+	}
+	return NULL;
+}
+
+static void qcrypto_free_req_control(struct crypto_engine *pce,
+					struct qcrypto_req_control *preq)
+{
+	/* do this before free req */
+	preq->req = NULL;
+	preq->arsp = NULL;
+	/* free req */
+	if (xchg(&preq->in_use, false) == false)
+		pr_warn("request info %p free already\n", preq);
+	else
+		atomic_dec(&pce->req_count);
+}
+
+static struct qcrypto_req_control *find_req_control_for_areq(
+					struct crypto_engine *pce,
+					struct crypto_async_request *areq)
+{
+	int i;
+	struct qcrypto_req_control *pqcrypto_req_control = pce->preq_pool;
+
+	for (i = 0; i < pce->max_req; i++) {
+		if (pqcrypto_req_control->req == areq)
+			return pqcrypto_req_control;
+		pqcrypto_req_control++;
+	}
+	return NULL;
+}
+
+static void qcrypto_init_req_control(struct crypto_engine *pce,
+			struct qcrypto_req_control *pqcrypto_req_control)
+{
+	int i;
+
+	pce->preq_pool = pqcrypto_req_control;
+	atomic_set(&pce->req_count, 0);
+	for (i = 0; i < pce->max_req; i++) {
+		pqcrypto_req_control->index = i;
+		pqcrypto_req_control->in_use = false;
+		pqcrypto_req_control->pce = pce;
+		pqcrypto_req_control++;
+	}
+}
+
+static struct crypto_engine *_qrypto_find_pengine_device(struct crypto_priv *cp,
+			 unsigned int device)
+{
+	struct crypto_engine *entry = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	list_for_each_entry(entry, &cp->engine_list, elist) {
+		if (entry->ce_device == device)
+			break;
+	}
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	if (((entry != NULL) && (entry->ce_device != device)) ||
+		(entry == NULL)) {
+		pr_err("Device node for CE device %d NOT FOUND!!\n",
+				device);
+		return NULL;
+	}
+
+	return entry;
+}
+
+static struct crypto_engine *_qrypto_find_pengine_device_hw
+			(struct crypto_priv *cp,
+			u32 device,
+			u32 hw_instance)
+{
+	struct crypto_engine *entry = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	list_for_each_entry(entry, &cp->engine_list, elist) {
+		if ((entry->ce_device == device) &&
+			(entry->ce_hw_instance == hw_instance))
+			break;
+	}
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	if (((entry != NULL) &&
+		((entry->ce_device != device)
+		|| (entry->ce_hw_instance != hw_instance)))
+		|| (entry == NULL)) {
+		pr_err("Device node for CE device %d NOT FOUND!!\n",
+						 device);
+		return NULL;
+	}
+	return entry;
+}
+
+int qcrypto_get_num_engines(void)
+{
+	struct crypto_priv *cp = &qcrypto_dev;
+	struct crypto_engine *entry = NULL;
+	int count = 0;
+
+	list_for_each_entry(entry, &cp->engine_list, elist) {
+		count++;
+	}
+	return count;
+}
+EXPORT_SYMBOL(qcrypto_get_num_engines);
+
+void qcrypto_get_engine_list(size_t num_engines,
+				struct crypto_engine_entry *arr)
+{
+	struct crypto_priv *cp = &qcrypto_dev;
+	struct crypto_engine *entry = NULL;
+	size_t arr_index = 0;
+
+	list_for_each_entry(entry, &cp->engine_list, elist) {
+		arr[arr_index].ce_device = entry->ce_device;
+		arr[arr_index].hw_instance = entry->ce_hw_instance;
+		arr_index++;
+		if (arr_index >= num_engines)
+			break;
+	}
+}
+EXPORT_SYMBOL(qcrypto_get_engine_list);
+
+enum qcrypto_alg_type {
+	QCRYPTO_ALG_CIPHER	= 0,
+	QCRYPTO_ALG_SHA	= 1,
+	QCRYPTO_ALG_AEAD = 2,
+	QCRYPTO_ALG_LAST
+};
+
+struct qcrypto_alg {
+	struct list_head entry;
+	struct crypto_alg cipher_alg;
+	struct ahash_alg sha_alg;
+	struct aead_alg aead_alg;
+	enum qcrypto_alg_type alg_type;
+	struct crypto_priv *cp;
+};
+
+#define QCRYPTO_MAX_KEY_SIZE	64
+/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */
+#define QCRYPTO_MAX_IV_LENGTH	16
+
+#define	QCRYPTO_CCM4309_NONCE_LEN	3
+
+struct qcrypto_cipher_ctx {
+	struct list_head rsp_queue;     /* response queue */
+	struct crypto_engine *pengine;  /* fixed engine assigned to this tfm */
+	struct crypto_priv *cp;
+	unsigned int flags;
+
+	enum qce_hash_alg_enum  auth_alg; /* for aead */
+	u8 auth_key[QCRYPTO_MAX_KEY_SIZE];
+	u8 iv[QCRYPTO_MAX_IV_LENGTH];
+
+	u8 enc_key[QCRYPTO_MAX_KEY_SIZE];
+	unsigned int enc_key_len;
+
+	unsigned int authsize;
+	unsigned int auth_key_len;
+
+	u8 ccm4309_nonce[QCRYPTO_CCM4309_NONCE_LEN];
+
+	struct crypto_skcipher *cipher_aes192_fb;
+
+	struct crypto_ahash *ahash_aead_aes192_fb;
+};
+
+struct qcrypto_resp_ctx {
+	struct list_head list;
+	struct llist_node llist;
+	struct crypto_async_request *async_req; /* async req */
+	int res;                                /* execution result */
+};
+
+struct qcrypto_cipher_req_ctx {
+	struct qcrypto_resp_ctx rsp_entry;/* rsp entry. */
+	struct crypto_engine *pengine;  /* engine assigned to this request */
+	u8 *iv;
+	u8 rfc4309_iv[QCRYPTO_MAX_IV_LENGTH];
+	unsigned int ivsize;
+	int  aead;
+	struct scatterlist asg;		/* Formatted associated data sg  */
+	unsigned char *adata;		/* Pointer to formatted assoc data */
+	enum qce_cipher_alg_enum alg;
+	enum qce_cipher_dir_enum dir;
+	enum qce_cipher_mode_enum mode;
+
+	struct scatterlist *orig_src;	/* Original src sg ptr  */
+	struct scatterlist *orig_dst;	/* Original dst sg ptr  */
+	struct scatterlist dsg;		/* Dest Data sg  */
+	struct scatterlist ssg;		/* Source Data sg  */
+	unsigned char *data;		/* Incoming data pointer*/
+
+	struct aead_request *aead_req;
+	struct ahash_request *fb_hash_req;
+	uint8_t	fb_ahash_digest[SHA256_DIGEST_SIZE];
+	struct scatterlist fb_ablkcipher_src_sg[2];
+	struct scatterlist fb_ablkcipher_dst_sg[2];
+	char *fb_aes_iv;
+	unsigned int  fb_ahash_length;
+	struct skcipher_request *fb_aes_req;
+	struct scatterlist *fb_aes_src;
+	struct scatterlist *fb_aes_dst;
+	unsigned int  fb_aes_cryptlen;
+};
+
+#define SHA_MAX_BLOCK_SIZE      SHA256_BLOCK_SIZE
+#define SHA_MAX_STATE_SIZE	(SHA256_DIGEST_SIZE / sizeof(u32))
+#define SHA_MAX_DIGEST_SIZE	 SHA256_DIGEST_SIZE
+
+#define	MSM_QCRYPTO_REQ_QUEUE_LENGTH 768
+#define	COMPLETION_CB_BACKLOG_LENGTH_STOP 400
+#define	COMPLETION_CB_BACKLOG_LENGTH_START \
+			(COMPLETION_CB_BACKLOG_LENGTH_STOP / 2)
+
+static uint8_t  _std_init_vector_sha1_uint8[] =   {
+	0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89,
+	0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76,
+	0xC3, 0xD2, 0xE1, 0xF0
+};
+
+/* standard initialization vector for SHA-256, source: FIPS 180-2 */
+static uint8_t _std_init_vector_sha256_uint8[] = {
+	0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85,
+	0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A,
+	0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C,
+	0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19
+};
+
+struct qcrypto_sha_ctx {
+	struct list_head rsp_queue;     /* response queue */
+	struct crypto_engine *pengine;  /* fixed engine assigned to this tfm */
+	struct crypto_priv *cp;
+	unsigned int flags;
+	enum qce_hash_alg_enum  alg;
+	uint32_t		diglen;
+	uint32_t		authkey_in_len;
+	uint8_t			authkey[SHA_MAX_BLOCK_SIZE];
+	struct ahash_request *ahash_req;
+	struct completion ahash_req_complete;
+};
+
+struct qcrypto_sha_req_ctx {
+	struct qcrypto_resp_ctx rsp_entry;/* rsp entry. */
+	struct crypto_engine *pengine;  /* engine assigned to this request */
+
+	struct scatterlist *src;
+	uint32_t nbytes;
+
+	struct scatterlist *orig_src;	/* Original src sg ptr  */
+	struct scatterlist dsg;		/* Data sg */
+	unsigned char *data;		/* Incoming data pointer*/
+	unsigned char *data2;		/* Updated data pointer*/
+
+	uint32_t byte_count[4];
+	u64 count;
+	uint8_t	first_blk;
+	uint8_t	last_blk;
+	uint8_t	 trailing_buf[SHA_MAX_BLOCK_SIZE];
+	uint32_t trailing_buf_len;
+
+	/* dma buffer, Internal use */
+	uint8_t	staging_dmabuf
+		[SHA_MAX_BLOCK_SIZE+SHA_MAX_DIGEST_SIZE+MAX_ALIGN_SIZE];
+
+	uint8_t	digest[SHA_MAX_DIGEST_SIZE];
+	struct scatterlist sg[2];
+};
+
+static void _byte_stream_to_words(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned int n;
+
+	n = len  / sizeof(uint32_t);
+	for (; n > 0; n--) {
+		*iv =  ((*b << 24)      & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00)     |
+				(*(b+3)          & 0xff);
+		b += sizeof(uint32_t);
+		iv++;
+	}
+
+	n = len %  sizeof(uint32_t);
+	if (n == 3) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000)   |
+				(((*(b+2)) << 8) & 0xff00);
+	} else if (n == 2) {
+		*iv = ((*b << 24) & 0xff000000) |
+				(((*(b+1)) << 16) & 0xff0000);
+	} else if (n == 1) {
+		*iv = ((*b << 24) & 0xff000000);
+	}
+}
+
+static void _words_to_byte_stream(uint32_t *iv, unsigned char *b,
+		unsigned int len)
+{
+	unsigned int n = len  / sizeof(uint32_t);
+
+	for (; n > 0; n--) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 16)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 8)    & 0xff);
+		*b++ = (unsigned char) (*iv           & 0xff);
+		iv++;
+	}
+	n = len % sizeof(uint32_t);
+	if (n == 3) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b++ = (unsigned char) ((*iv >> 16)   & 0xff);
+		*b =   (unsigned char) ((*iv >> 8)    & 0xff);
+	} else if (n == 2) {
+		*b++ = (unsigned char) ((*iv >> 24)   & 0xff);
+		*b =   (unsigned char) ((*iv >> 16)   & 0xff);
+	} else if (n == 1) {
+		*b =   (unsigned char) ((*iv >> 24)   & 0xff);
+	}
+}
+
+static void qcrypto_ce_set_bus(struct crypto_engine *pengine,
+				 bool high_bw_req)
+{
+	int ret = 0;
+
+	if (high_bw_req) {
+		ret = qce_enable_clk(pengine->qce);
+		if (ret) {
+			pr_err("%s Unable enable clk\n", __func__);
+			goto clk_err;
+		}
+		ret = msm_bus_scale_client_update_request(
+				pengine->bus_scale_handle, 1);
+		if (ret) {
+			pr_err("%s Unable to set to high bandwidth\n",
+						__func__);
+			qce_disable_clk(pengine->qce);
+			goto clk_err;
+		}
+	} else {
+		ret = msm_bus_scale_client_update_request(
+				pengine->bus_scale_handle, 0);
+		if (ret) {
+			pr_err("%s Unable to set to low bandwidth\n",
+						__func__);
+			goto clk_err;
+		}
+		ret = qce_disable_clk(pengine->qce);
+		if (ret) {
+			pr_err("%s Unable disable clk\n", __func__);
+			ret = msm_bus_scale_client_update_request(
+				pengine->bus_scale_handle, 1);
+			if (ret)
+				pr_err("%s Unable to set to high bandwidth\n",
+						__func__);
+			goto clk_err;
+		}
+	}
+clk_err:
+	return;
+
+}
+
+static void qcrypto_bw_reaper_timer_callback(unsigned long data)
+{
+	struct crypto_engine *pengine = (struct crypto_engine *)data;
+
+	schedule_work(&pengine->bw_reaper_ws);
+}
+
+static void qcrypto_bw_set_timeout(struct crypto_engine *pengine)
+{
+	pengine->bw_reaper_timer.data =
+			(unsigned long)(pengine);
+	pengine->bw_reaper_timer.expires = jiffies +
+			msecs_to_jiffies(QCRYPTO_HIGH_BANDWIDTH_TIMEOUT);
+	mod_timer(&(pengine->bw_reaper_timer),
+		pengine->bw_reaper_timer.expires);
+}
+
+static void qcrypto_ce_bw_allocate_req(struct crypto_engine *pengine)
+{
+	schedule_work(&pengine->bw_allocate_ws);
+}
+
+static int _start_qcrypto_process(struct crypto_priv *cp,
+					struct crypto_engine *pengine);
+
+static void qcrypto_bw_allocate_work(struct work_struct *work)
+{
+	struct  crypto_engine *pengine = container_of(work,
+				struct crypto_engine, bw_allocate_ws);
+	unsigned long flags;
+	struct crypto_priv *cp = pengine->pcp;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	pengine->bw_state = BUS_BANDWIDTH_ALLOCATING;
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	qcrypto_ce_set_bus(pengine, true);
+	qcrypto_bw_set_timeout(pengine);
+	spin_lock_irqsave(&cp->lock, flags);
+	pengine->bw_state = BUS_HAS_BANDWIDTH;
+	pengine->high_bw_req = false;
+	pengine->active_seq++;
+	pengine->check_flag = true;
+	spin_unlock_irqrestore(&cp->lock, flags);
+	_start_qcrypto_process(cp, pengine);
+};
+
+static void qcrypto_bw_reaper_work(struct work_struct *work)
+{
+	struct  crypto_engine *pengine = container_of(work,
+				struct crypto_engine, bw_reaper_ws);
+	struct crypto_priv *cp = pengine->pcp;
+	unsigned long flags;
+	u32    active_seq;
+	bool restart = false;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	active_seq = pengine->active_seq;
+	if (pengine->bw_state == BUS_HAS_BANDWIDTH &&
+		(active_seq == pengine->last_active_seq)) {
+
+		/* check if engine is stuck */
+		if (atomic_read(&pengine->req_count) > 0) {
+			if (pengine->check_flag)
+				dev_warn(&pengine->pdev->dev,
+				"The engine appears to be stuck seq %d.\n",
+				active_seq);
+			pengine->check_flag = false;
+			goto ret;
+		}
+		if (cp->platform_support.bus_scale_table == NULL)
+			goto ret;
+		pengine->bw_state = BUS_BANDWIDTH_RELEASING;
+		spin_unlock_irqrestore(&cp->lock, flags);
+
+		qcrypto_ce_set_bus(pengine, false);
+
+		spin_lock_irqsave(&cp->lock, flags);
+
+		if (pengine->high_bw_req == true) {
+			/* we got request while we are disabling clock */
+			pengine->bw_state = BUS_BANDWIDTH_ALLOCATING;
+			spin_unlock_irqrestore(&cp->lock, flags);
+
+			qcrypto_ce_set_bus(pengine, true);
+
+			spin_lock_irqsave(&cp->lock, flags);
+			pengine->bw_state = BUS_HAS_BANDWIDTH;
+			pengine->high_bw_req = false;
+			restart = true;
+		} else
+			pengine->bw_state = BUS_NO_BANDWIDTH;
+	}
+ret:
+	pengine->last_active_seq = active_seq;
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (restart)
+		_start_qcrypto_process(cp, pengine);
+	if (pengine->bw_state != BUS_NO_BANDWIDTH)
+		qcrypto_bw_set_timeout(pengine);
+}
+
+static int qcrypto_count_sg(struct scatterlist *sg, int nbytes)
+{
+	int i;
+
+	for (i = 0; nbytes > 0 && sg != NULL; i++, sg = sg_next(sg))
+		nbytes -= sg->length;
+
+	return i;
+}
+
+static size_t qcrypto_sg_copy_from_buffer(struct scatterlist *sgl,
+				unsigned int nents, void *buf, size_t buflen)
+{
+	int i;
+	size_t offset, len;
+
+	for (i = 0, offset = 0; i < nents; ++i) {
+		len = sg_copy_from_buffer(sgl, 1, buf, buflen);
+		buf += len;
+		buflen -= len;
+		offset += len;
+		sgl = sg_next(sgl);
+	}
+
+	return offset;
+}
+
+static size_t qcrypto_sg_copy_to_buffer(struct scatterlist *sgl,
+				unsigned int nents, void *buf, size_t buflen)
+{
+	int i;
+	size_t offset, len;
+
+	for (i = 0, offset = 0; i < nents; ++i) {
+		len = sg_copy_to_buffer(sgl, 1, buf, buflen);
+		buf += len;
+		buflen -= len;
+		offset += len;
+		sgl = sg_next(sgl);
+	}
+
+	return offset;
+}
+static struct qcrypto_alg *_qcrypto_sha_alg_alloc(struct crypto_priv *cp,
+		struct ahash_alg *template)
+{
+	struct qcrypto_alg *q_alg;
+
+	q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+	if (!q_alg)
+		return ERR_PTR(-ENOMEM);
+
+	q_alg->alg_type = QCRYPTO_ALG_SHA;
+	q_alg->sha_alg = *template;
+	q_alg->cp = cp;
+
+	return q_alg;
+};
+
+static struct qcrypto_alg *_qcrypto_cipher_alg_alloc(struct crypto_priv *cp,
+		struct crypto_alg *template)
+{
+	struct qcrypto_alg *q_alg;
+
+	q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+	if (!q_alg)
+		return ERR_PTR(-ENOMEM);
+
+	q_alg->alg_type = QCRYPTO_ALG_CIPHER;
+	q_alg->cipher_alg = *template;
+	q_alg->cp = cp;
+
+	return q_alg;
+};
+
+static struct qcrypto_alg *_qcrypto_aead_alg_alloc(struct crypto_priv *cp,
+		struct aead_alg *template)
+{
+	struct qcrypto_alg *q_alg;
+
+	q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL);
+	if (!q_alg)
+		return ERR_PTR(-ENOMEM);
+
+	q_alg->alg_type = QCRYPTO_ALG_AEAD;
+	q_alg->aead_alg = *template;
+	q_alg->cp = cp;
+
+	return q_alg;
+};
+
+static int _qcrypto_cipher_ctx_init(struct qcrypto_cipher_ctx *ctx,
+					struct qcrypto_alg *q_alg)
+{
+	if (!ctx || !q_alg) {
+		pr_err("ctx or q_alg is NULL\n");
+		return -EINVAL;
+	}
+	ctx->flags = 0;
+	/* update context with ptr to cp */
+	ctx->cp = q_alg->cp;
+	/* random first IV */
+	get_random_bytes(ctx->iv, QCRYPTO_MAX_IV_LENGTH);
+	if (_qcrypto_init_assign) {
+		ctx->pengine = _qcrypto_static_assign_engine(ctx->cp);
+		if (ctx->pengine == NULL)
+			return -ENODEV;
+	} else
+		ctx->pengine = NULL;
+	INIT_LIST_HEAD(&ctx->rsp_queue);
+	ctx->auth_alg = QCE_HASH_LAST;
+	return 0;
+}
+
+static int _qcrypto_cipher_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_alg *alg = tfm->__crt_alg;
+	struct qcrypto_alg *q_alg;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	q_alg = container_of(alg, struct qcrypto_alg, cipher_alg);
+	return _qcrypto_cipher_ctx_init(ctx, q_alg);
+};
+
+static int _qcrypto_ahash_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+	struct ahash_alg *alg =	container_of(crypto_hash_alg_common(ahash),
+						struct ahash_alg, halg);
+	struct qcrypto_alg *q_alg = container_of(alg, struct qcrypto_alg,
+								sha_alg);
+
+	crypto_ahash_set_reqsize(ahash, sizeof(struct qcrypto_sha_req_ctx));
+	/* update context with ptr to cp */
+	sha_ctx->cp = q_alg->cp;
+	sha_ctx->flags = 0;
+	sha_ctx->ahash_req = NULL;
+	if (_qcrypto_init_assign) {
+		sha_ctx->pengine = _qcrypto_static_assign_engine(sha_ctx->cp);
+		if (sha_ctx->pengine == NULL)
+			return -ENODEV;
+	} else
+		sha_ctx->pengine = NULL;
+	INIT_LIST_HEAD(&sha_ctx->rsp_queue);
+	return 0;
+};
+
+static void _qcrypto_ahash_cra_exit(struct crypto_tfm *tfm)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+
+	if (!list_empty(&sha_ctx->rsp_queue))
+		pr_err("_qcrypto_ahash_cra_exit: requests still outstanding");
+	if (sha_ctx->ahash_req != NULL) {
+		ahash_request_free(sha_ctx->ahash_req);
+		sha_ctx->ahash_req = NULL;
+	}
+};
+
+
+static void _crypto_sha_hmac_ahash_req_complete(
+	struct crypto_async_request *req, int err);
+
+static int _qcrypto_ahash_hmac_cra_init(struct crypto_tfm *tfm)
+{
+	struct crypto_ahash *ahash = __crypto_ahash_cast(tfm);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm);
+	int ret = 0;
+
+	ret = _qcrypto_ahash_cra_init(tfm);
+	if (ret)
+		return ret;
+	sha_ctx->ahash_req = ahash_request_alloc(ahash, GFP_KERNEL);
+
+	if (sha_ctx->ahash_req == NULL) {
+		_qcrypto_ahash_cra_exit(tfm);
+		return -ENOMEM;
+	}
+
+	init_completion(&sha_ctx->ahash_req_complete);
+	ahash_request_set_callback(sha_ctx->ahash_req,
+				CRYPTO_TFM_REQ_MAY_BACKLOG,
+				_crypto_sha_hmac_ahash_req_complete,
+				&sha_ctx->ahash_req_complete);
+	crypto_ahash_clear_flags(ahash, ~0);
+
+	return 0;
+};
+
+static int _qcrypto_cra_ablkcipher_init(struct crypto_tfm *tfm)
+{
+	tfm->crt_ablkcipher.reqsize = sizeof(struct qcrypto_cipher_req_ctx);
+	return _qcrypto_cipher_cra_init(tfm);
+};
+
+static int _qcrypto_cra_aes_ablkcipher_init(struct crypto_tfm *tfm)
+{
+	const char *name = tfm->__crt_alg->cra_name;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	int ret;
+	struct crypto_priv *cp = &qcrypto_dev;
+
+	if (cp->ce_support.use_sw_aes_cbc_ecb_ctr_algo) {
+		ctx->cipher_aes192_fb = NULL;
+		return _qcrypto_cra_ablkcipher_init(tfm);
+	}
+	ctx->cipher_aes192_fb = crypto_alloc_skcipher(name, 0,
+			CRYPTO_ALG_ASYNC | CRYPTO_ALG_NEED_FALLBACK);
+	if (IS_ERR(ctx->cipher_aes192_fb)) {
+		pr_err("Error allocating fallback algo %s\n", name);
+		ret = PTR_ERR(ctx->cipher_aes192_fb);
+		ctx->cipher_aes192_fb = NULL;
+		return ret;
+	}
+	return _qcrypto_cra_ablkcipher_init(tfm);
+};
+
+static int _qcrypto_aead_cra_init(struct crypto_aead *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+	struct aead_alg *aeadalg = crypto_aead_alg(tfm);
+	struct qcrypto_alg *q_alg = container_of(aeadalg, struct qcrypto_alg,
+						aead_alg);
+	return _qcrypto_cipher_ctx_init(ctx, q_alg);
+};
+
+static int _qcrypto_cra_aead_sha1_init(struct crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	ctx->auth_alg = QCE_HASH_SHA1_HMAC;
+	return rc;
+}
+
+static int _qcrypto_cra_aead_sha256_init(struct crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	ctx->auth_alg = QCE_HASH_SHA256_HMAC;
+	return rc;
+}
+
+static int _qcrypto_cra_aead_ccm_init(struct  crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	ctx->auth_alg =  QCE_HASH_AES_CMAC;
+	return rc;
+}
+
+static int _qcrypto_cra_aead_rfc4309_ccm_init(struct  crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	ctx->auth_alg =  QCE_HASH_AES_CMAC;
+	return rc;
+}
+
+static int _qcrypto_cra_aead_aes_sha1_init(struct crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+	struct crypto_priv *cp = &qcrypto_dev;
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	if (rc)
+		return rc;
+	ctx->cipher_aes192_fb = NULL;
+	ctx->ahash_aead_aes192_fb = NULL;
+	if (!cp->ce_support.aes_key_192) {
+		ctx->cipher_aes192_fb = crypto_alloc_skcipher(
+							"cbc(aes)", 0, 0);
+		if (IS_ERR(ctx->cipher_aes192_fb)) {
+			ctx->cipher_aes192_fb = NULL;
+		} else {
+			ctx->ahash_aead_aes192_fb = crypto_alloc_ahash(
+							"hmac(sha1)", 0, 0);
+			if (IS_ERR(ctx->ahash_aead_aes192_fb)) {
+				ctx->ahash_aead_aes192_fb = NULL;
+				crypto_free_skcipher(ctx->cipher_aes192_fb);
+				ctx->cipher_aes192_fb = NULL;
+			}
+		}
+	}
+	ctx->auth_alg = QCE_HASH_SHA1_HMAC;
+	return 0;
+}
+
+static int _qcrypto_cra_aead_aes_sha256_init(struct crypto_aead *tfm)
+{
+	int rc;
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+	struct crypto_priv *cp = &qcrypto_dev;
+
+	crypto_aead_set_reqsize(tfm, sizeof(struct qcrypto_cipher_req_ctx));
+	rc = _qcrypto_aead_cra_init(tfm);
+	if (rc)
+		return rc;
+	ctx->cipher_aes192_fb = NULL;
+	ctx->ahash_aead_aes192_fb = NULL;
+	if (!cp->ce_support.aes_key_192) {
+		ctx->cipher_aes192_fb = crypto_alloc_skcipher(
+							"cbc(aes)", 0, 0);
+		if (IS_ERR(ctx->cipher_aes192_fb)) {
+			ctx->cipher_aes192_fb = NULL;
+		} else {
+			ctx->ahash_aead_aes192_fb = crypto_alloc_ahash(
+							"hmac(sha256)", 0, 0);
+			if (IS_ERR(ctx->ahash_aead_aes192_fb)) {
+				ctx->ahash_aead_aes192_fb = NULL;
+				crypto_free_skcipher(ctx->cipher_aes192_fb);
+				ctx->cipher_aes192_fb = NULL;
+			}
+		}
+	}
+	ctx->auth_alg = QCE_HASH_SHA256_HMAC;
+	return 0;
+}
+
+static void _qcrypto_cra_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if (!list_empty(&ctx->rsp_queue))
+		pr_err("_qcrypto__cra_ablkcipher_exit: requests still outstanding");
+};
+
+static void _qcrypto_cra_aes_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	_qcrypto_cra_ablkcipher_exit(tfm);
+	if (ctx->cipher_aes192_fb)
+		crypto_free_skcipher(ctx->cipher_aes192_fb);
+	ctx->cipher_aes192_fb = NULL;
+}
+
+static void _qcrypto_cra_aead_exit(struct crypto_aead *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	if (!list_empty(&ctx->rsp_queue))
+		pr_err("_qcrypto__cra_aead_exit: requests still outstanding");
+}
+
+static void _qcrypto_cra_aead_aes_exit(struct crypto_aead *tfm)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+
+	if (!list_empty(&ctx->rsp_queue))
+		pr_err("_qcrypto__cra_aead_exit: requests still outstanding");
+	if (ctx->cipher_aes192_fb)
+		crypto_free_skcipher(ctx->cipher_aes192_fb);
+	if (ctx->ahash_aead_aes192_fb)
+		crypto_free_ahash(ctx->ahash_aead_aes192_fb);
+	ctx->cipher_aes192_fb = NULL;
+	ctx->ahash_aead_aes192_fb = NULL;
+}
+
+static int _disp_stats(int id)
+{
+	struct crypto_stat *pstat;
+	int len = 0;
+	unsigned long flags;
+	struct crypto_priv *cp = &qcrypto_dev;
+	struct crypto_engine *pe;
+	int i;
+
+	pstat = &_qcrypto_stat;
+	len = scnprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1,
+			"\nQTI crypto accelerator %d Statistics\n",
+				id + 1);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER AES encryption          : %llu\n",
+					pstat->ablk_cipher_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER AES decryption          : %llu\n",
+					pstat->ablk_cipher_aes_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER DES encryption          : %llu\n",
+					pstat->ablk_cipher_des_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER DES decryption          : %llu\n",
+					pstat->ablk_cipher_des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER 3DES encryption         : %llu\n",
+					pstat->ablk_cipher_3des_enc);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER 3DES decryption         : %llu\n",
+					pstat->ablk_cipher_3des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER operation success       : %llu\n",
+					pstat->ablk_cipher_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   ABLK CIPHER operation fail          : %llu\n",
+					pstat->ablk_cipher_op_fail);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"\n");
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-AES encryption            : %llu\n",
+					pstat->aead_sha1_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-AES decryption            : %llu\n",
+					pstat->aead_sha1_aes_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-DES encryption            : %llu\n",
+					pstat->aead_sha1_des_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-DES decryption            : %llu\n",
+					pstat->aead_sha1_des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-3DES encryption           : %llu\n",
+					pstat->aead_sha1_3des_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA1-3DES decryption           : %llu\n",
+					pstat->aead_sha1_3des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-AES encryption          : %llu\n",
+					pstat->aead_sha256_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-AES decryption          : %llu\n",
+					pstat->aead_sha256_aes_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-DES encryption          : %llu\n",
+					pstat->aead_sha256_des_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-DES decryption          : %llu\n",
+					pstat->aead_sha256_des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-3DES encryption         : %llu\n",
+					pstat->aead_sha256_3des_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD SHA256-3DES decryption         : %llu\n",
+					pstat->aead_sha256_3des_dec);
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD CCM-AES encryption             : %llu\n",
+					pstat->aead_ccm_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD CCM-AES decryption             : %llu\n",
+					pstat->aead_ccm_aes_dec);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD RFC4309-CCM-AES encryption     : %llu\n",
+					pstat->aead_rfc4309_ccm_aes_enc);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD RFC4309-CCM-AES decryption     : %llu\n",
+					pstat->aead_rfc4309_ccm_aes_dec);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD operation success              : %llu\n",
+					pstat->aead_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD operation fail                 : %llu\n",
+					pstat->aead_op_fail);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AEAD bad message                    : %llu\n",
+					pstat->aead_bad_msg);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"\n");
+
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH SHA1 digest                   : %llu\n",
+					pstat->sha1_digest);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH SHA256 digest                 : %llu\n",
+					pstat->sha256_digest);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH SHA1 HMAC digest              : %llu\n",
+					pstat->sha1_hmac_digest);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH SHA256 HMAC digest            : %llu\n",
+					pstat->sha256_hmac_digest);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH operation success             : %llu\n",
+					pstat->ahash_op_success);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   AHASH operation fail                : %llu\n",
+					pstat->ahash_op_fail);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   resp start, resp stop, max rsp queue reorder-cnt : %u %u %u %u\n",
+					cp->resp_start, cp->resp_stop,
+					cp->max_resp_qlen, cp->max_reorder_cnt);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   max queue legnth, no avail          : %u %u\n",
+					cp->max_qlen, cp->no_avail);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"   work queue                          : %u %u %u\n",
+					cp->queue_work_eng3,
+					cp->queue_work_not_eng3,
+					cp->queue_work_not_eng3_nz);
+	len += scnprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1,
+			"\n");
+	spin_lock_irqsave(&cp->lock, flags);
+	list_for_each_entry(pe, &cp->engine_list, elist) {
+		len += scnprintf(
+			_debug_read_buf + len,
+			DEBUG_MAX_RW_BUF - len - 1,
+			"   Engine %4d Req max %d          : %llu\n",
+			pe->unit,
+			pe->max_req_used,
+			pe->total_req
+		);
+		len += scnprintf(
+			_debug_read_buf + len,
+			DEBUG_MAX_RW_BUF - len - 1,
+			"   Engine %4d Req Error               : %llu\n",
+			pe->unit,
+			pe->err_req
+		);
+		qce_get_driver_stats(pe->qce);
+	}
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	for (i = 0; i < MAX_SMP_CPU+1; i++)
+		if (cp->cpu_req[i])
+			len += scnprintf(
+				_debug_read_buf + len,
+				DEBUG_MAX_RW_BUF - len - 1,
+				"CPU %d Issue Req                     : %d\n",
+				i, cp->cpu_req[i]);
+	return len;
+}
+
+static void _qcrypto_remove_engine(struct crypto_engine *pengine)
+{
+	struct crypto_priv *cp;
+	struct qcrypto_alg *q_alg;
+	struct qcrypto_alg *n;
+	unsigned long flags;
+	struct crypto_engine *pe;
+
+	cp = pengine->pcp;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	list_del(&pengine->elist);
+	if (pengine->first_engine) {
+		cp->first_engine = NULL;
+		pe = list_first_entry(&cp->engine_list, struct crypto_engine,
+								elist);
+		if (pe) {
+			pe->first_engine = true;
+			cp->first_engine = pe;
+		}
+	}
+	if (cp->next_engine == pengine)
+		cp->next_engine = NULL;
+	if (cp->scheduled_eng == pengine)
+		cp->scheduled_eng = NULL;
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	cp->total_units--;
+
+	cancel_work_sync(&pengine->bw_reaper_ws);
+	cancel_work_sync(&pengine->bw_allocate_ws);
+	del_timer_sync(&pengine->bw_reaper_timer);
+
+	if (pengine->bus_scale_handle != 0)
+		msm_bus_scale_unregister_client(pengine->bus_scale_handle);
+	pengine->bus_scale_handle = 0;
+
+	kzfree(pengine->preq_pool);
+
+	if (cp->total_units)
+		return;
+
+	list_for_each_entry_safe(q_alg, n, &cp->alg_list, entry) {
+		if (q_alg->alg_type == QCRYPTO_ALG_CIPHER)
+			crypto_unregister_alg(&q_alg->cipher_alg);
+		if (q_alg->alg_type == QCRYPTO_ALG_SHA)
+			crypto_unregister_ahash(&q_alg->sha_alg);
+		if (q_alg->alg_type == QCRYPTO_ALG_AEAD)
+			crypto_unregister_aead(&q_alg->aead_alg);
+		list_del(&q_alg->entry);
+		kzfree(q_alg);
+	}
+}
+
+static int _qcrypto_remove(struct platform_device *pdev)
+{
+	struct crypto_engine *pengine;
+	struct crypto_priv *cp;
+
+	pengine = platform_get_drvdata(pdev);
+
+	if (!pengine)
+		return 0;
+	cp = pengine->pcp;
+	mutex_lock(&cp->engine_lock);
+	_qcrypto_remove_engine(pengine);
+	mutex_unlock(&cp->engine_lock);
+	if (pengine->qce)
+		qce_close(pengine->qce);
+	kzfree(pengine);
+	return 0;
+}
+
+static int _qcrypto_check_aes_keylen(struct crypto_ablkcipher *cipher,
+		struct crypto_priv *cp, unsigned int len)
+{
+
+	switch (len) {
+	case AES_KEYSIZE_128:
+	case AES_KEYSIZE_256:
+		break;
+	case AES_KEYSIZE_192:
+		if (cp->ce_support.aes_key_192)
+			break;
+	default:
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	};
+
+	return 0;
+}
+
+static int _qcrypto_setkey_aes_192_fallback(struct crypto_ablkcipher *cipher,
+		const u8 *key)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	int ret;
+
+	ctx->enc_key_len = AES_KEYSIZE_192;
+	ctx->cipher_aes192_fb->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK;
+	ctx->cipher_aes192_fb->base.crt_flags |=
+			(cipher->base.crt_flags & CRYPTO_TFM_REQ_MASK);
+	ret = crypto_skcipher_setkey(ctx->cipher_aes192_fb, key,
+			AES_KEYSIZE_192);
+	if (ret) {
+		tfm->crt_flags &= ~CRYPTO_TFM_RES_MASK;
+		tfm->crt_flags |=
+			(cipher->base.crt_flags & CRYPTO_TFM_RES_MASK);
+	}
+	return ret;
+}
+
+static int _qcrypto_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key,
+		unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY)
+		return 0;
+
+	if ((len == AES_KEYSIZE_192) && (!cp->ce_support.aes_key_192)
+					&& ctx->cipher_aes192_fb)
+		return _qcrypto_setkey_aes_192_fallback(cipher, key);
+
+	if (_qcrypto_check_aes_keylen(cipher, cp, len))
+		return -EINVAL;
+
+	ctx->enc_key_len = len;
+	if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY))  {
+		if (key != NULL) {
+			memcpy(ctx->enc_key, key, len);
+		} else {
+			pr_err("%s Inavlid key pointer\n", __func__);
+			return -EINVAL;
+		}
+	}
+	return 0;
+};
+
+static int _qcrypto_setkey_aes_xts(struct crypto_ablkcipher *cipher,
+		const u8 *key, unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY)
+		return 0;
+	if (_qcrypto_check_aes_keylen(cipher, cp, len/2))
+		return -EINVAL;
+
+	ctx->enc_key_len = len;
+	if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY))  {
+		if (key != NULL) {
+			memcpy(ctx->enc_key, key, len);
+		} else {
+			pr_err("%s Inavlid key pointer\n", __func__);
+			return -EINVAL;
+		}
+	}
+	return 0;
+};
+
+static int _qcrypto_setkey_des(struct crypto_ablkcipher *cipher, const u8 *key,
+		unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	u32 tmp[DES_EXPKEY_WORDS];
+	int ret;
+
+	if (!key) {
+		pr_err("%s Inavlid key pointer\n", __func__);
+		return -EINVAL;
+	}
+
+	ret = des_ekey(tmp, key);
+
+	if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+		pr_err("%s HW KEY usage not supported for DES algorithm\n",
+								__func__);
+		return 0;
+	};
+
+	if (len != DES_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	};
+
+	if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) {
+		tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
+		return -EINVAL;
+	}
+
+	ctx->enc_key_len = len;
+	if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY))
+		memcpy(ctx->enc_key, key, len);
+
+	return 0;
+};
+
+static int _qcrypto_setkey_3des(struct crypto_ablkcipher *cipher, const u8 *key,
+		unsigned int len)
+{
+	struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+	if ((ctx->flags & QCRYPTO_CTX_USE_HW_KEY) == QCRYPTO_CTX_USE_HW_KEY) {
+		pr_err("%s HW KEY usage not supported for 3DES algorithm\n",
+								__func__);
+		return 0;
+	};
+	if (len != DES3_EDE_KEY_SIZE) {
+		crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	};
+	ctx->enc_key_len = len;
+	if (!(ctx->flags & QCRYPTO_CTX_USE_PIPE_KEY)) {
+		if (key != NULL) {
+			memcpy(ctx->enc_key, key, len);
+		} else {
+			pr_err("%s Inavlid key pointer\n", __func__);
+			return -EINVAL;
+		}
+	}
+	return 0;
+};
+
+static void seq_response(struct work_struct *work)
+{
+	struct crypto_priv *cp = container_of(work, struct crypto_priv,
+							 resp_work);
+	struct llist_node *list;
+	struct llist_node *rev = NULL;
+	struct crypto_engine *pengine;
+	unsigned long flags;
+	int total_unit;
+
+again:
+	list = llist_del_all(&cp->ordered_resp_list);
+
+	if (!list)
+		goto end;
+
+	while (list) {
+		struct llist_node *t = list;
+
+		list = llist_next(list);
+		t->next = rev;
+		rev = t;
+	}
+
+	while (rev) {
+		struct qcrypto_resp_ctx *arsp;
+		struct crypto_async_request *areq;
+
+		arsp = container_of(rev, struct qcrypto_resp_ctx, llist);
+		rev = llist_next(rev);
+
+		areq = arsp->async_req;
+		local_bh_disable();
+		areq->complete(areq, arsp->res);
+		local_bh_enable();
+		atomic_dec(&cp->resp_cnt);
+	}
+
+	if (atomic_read(&cp->resp_cnt) < COMPLETION_CB_BACKLOG_LENGTH_START &&
+		(cmpxchg(&cp->ce_req_proc_sts, STOPPED, IN_PROGRESS)
+						== STOPPED)) {
+		cp->resp_start++;
+		for (total_unit = cp->total_units; total_unit-- > 0;) {
+			spin_lock_irqsave(&cp->lock, flags);
+			pengine = _avail_eng(cp);
+			spin_unlock_irqrestore(&cp->lock, flags);
+			if (pengine)
+				_start_qcrypto_process(cp, pengine);
+			else
+				break;
+		}
+	}
+end:
+	if (cmpxchg(&cp->sched_resp_workq_status, SCHEDULE_AGAIN,
+				IS_SCHEDULED) == SCHEDULE_AGAIN)
+		goto again;
+	else if (cmpxchg(&cp->sched_resp_workq_status, IS_SCHEDULED,
+				NOT_SCHEDULED) == SCHEDULE_AGAIN)
+		goto end;
+}
+
+#define SCHEUDLE_RSP_QLEN_THRESHOLD 64
+
+static void _qcrypto_tfm_complete(struct crypto_engine *pengine, u32 type,
+					void *tfm_ctx,
+					struct qcrypto_resp_ctx *cur_arsp,
+					int res)
+{
+	struct crypto_priv *cp = pengine->pcp;
+	unsigned long flags;
+	struct qcrypto_resp_ctx *arsp;
+	struct list_head *plist;
+	unsigned int resp_qlen;
+	unsigned int cnt = 0;
+
+	switch (type) {
+	case CRYPTO_ALG_TYPE_AHASH:
+		plist = &((struct qcrypto_sha_ctx *) tfm_ctx)->rsp_queue;
+		break;
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+	case CRYPTO_ALG_TYPE_AEAD:
+	default:
+		plist = &((struct qcrypto_cipher_ctx *) tfm_ctx)->rsp_queue;
+		break;
+	}
+
+	spin_lock_irqsave(&cp->lock, flags);
+
+	cur_arsp->res = res;
+	while (!list_empty(plist)) {
+		arsp = list_first_entry(plist,
+				struct qcrypto_resp_ctx, list);
+		if (arsp->res == -EINPROGRESS)
+			break;
+		list_del(&arsp->list);
+		llist_add(&arsp->llist, &cp->ordered_resp_list);
+		atomic_inc(&cp->resp_cnt);
+		cnt++;
+	}
+	resp_qlen = atomic_read(&cp->resp_cnt);
+	if (resp_qlen > cp->max_resp_qlen)
+		cp->max_resp_qlen = resp_qlen;
+	if (cnt > cp->max_reorder_cnt)
+		cp->max_reorder_cnt = cnt;
+	if ((resp_qlen >= COMPLETION_CB_BACKLOG_LENGTH_STOP) &&
+		cmpxchg(&cp->ce_req_proc_sts, IN_PROGRESS,
+						STOPPED) == IN_PROGRESS) {
+		cp->resp_stop++;
+	}
+
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+retry:
+	if (!llist_empty(&cp->ordered_resp_list)) {
+		unsigned int cpu;
+
+		if (pengine->first_engine) {
+			cpu = WORK_CPU_UNBOUND;
+			cp->queue_work_eng3++;
+		} else {
+			cp->queue_work_not_eng3++;
+			cpu = cp->cpu_getting_irqs_frm_first_ce;
+			/*
+			 * If source not the first engine, and there
+			 * are outstanding requests going on first engine,
+			 * skip scheduling of work queue to anticipate
+			 * more may be coming. If the response queue
+			 * length exceeds threshold, to avoid further
+			 * delay, schedule work queue immediately.
+			 */
+			if (cp->first_engine && atomic_read(
+						&cp->first_engine->req_count)) {
+				if (resp_qlen < SCHEUDLE_RSP_QLEN_THRESHOLD)
+					return;
+				cp->queue_work_not_eng3_nz++;
+			}
+		}
+		if (cmpxchg(&cp->sched_resp_workq_status, NOT_SCHEDULED,
+					IS_SCHEDULED) == NOT_SCHEDULED)
+			queue_work_on(cpu, cp->resp_wq, &cp->resp_work);
+		else if (cmpxchg(&cp->sched_resp_workq_status, IS_SCHEDULED,
+					SCHEDULE_AGAIN) == NOT_SCHEDULED)
+			goto retry;
+	}
+}
+
+static void req_done(struct qcrypto_req_control *pqcrypto_req_control)
+{
+	struct crypto_engine *pengine;
+	struct crypto_async_request *areq;
+	struct crypto_priv *cp;
+	struct qcrypto_resp_ctx *arsp;
+	u32 type = 0;
+	void *tfm_ctx = NULL;
+	unsigned int cpu;
+	int res;
+
+	pengine = pqcrypto_req_control->pce;
+	cp = pengine->pcp;
+	areq = pqcrypto_req_control->req;
+	arsp = pqcrypto_req_control->arsp;
+	res = pqcrypto_req_control->res;
+	qcrypto_free_req_control(pengine, pqcrypto_req_control);
+
+	if (areq) {
+		type = crypto_tfm_alg_type(areq->tfm);
+		tfm_ctx = crypto_tfm_ctx(areq->tfm);
+	}
+	cpu = smp_processor_id();
+	pengine->irq_cpu = cpu;
+	if (pengine->first_engine) {
+		if (cpu  != cp->cpu_getting_irqs_frm_first_ce)
+			cp->cpu_getting_irqs_frm_first_ce = cpu;
+	}
+	if (areq)
+		_qcrypto_tfm_complete(pengine, type, tfm_ctx, arsp, res);
+	if (READ_ONCE(cp->ce_req_proc_sts) == IN_PROGRESS)
+		_start_qcrypto_process(cp, pengine);
+}
+
+static void _qce_ahash_complete(void *cookie, unsigned char *digest,
+		unsigned char *authdata, int ret)
+{
+	struct ahash_request *areq = (struct ahash_request *) cookie;
+	struct crypto_async_request *async_req;
+	struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(areq);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+	uint32_t diglen = crypto_ahash_digestsize(ahash);
+	uint32_t *auth32 = (uint32_t *)authdata;
+	struct crypto_engine *pengine;
+	struct qcrypto_req_control *pqcrypto_req_control;
+
+	async_req = &areq->base;
+	pstat = &_qcrypto_stat;
+
+	pengine = rctx->pengine;
+	pqcrypto_req_control = find_req_control_for_areq(pengine,
+							 async_req);
+	if (pqcrypto_req_control == NULL) {
+		pr_err("async request not found\n");
+		return;
+	}
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&pengine->pdev->dev, "_qce_ahash_complete: %p ret %d\n",
+				areq, ret);
+#endif
+	if (digest) {
+		memcpy(rctx->digest, digest, diglen);
+		if (rctx->last_blk)
+			memcpy(areq->result, digest, diglen);
+	}
+	if (authdata) {
+		rctx->byte_count[0] = auth32[0];
+		rctx->byte_count[1] = auth32[1];
+		rctx->byte_count[2] = auth32[2];
+		rctx->byte_count[3] = auth32[3];
+	}
+	areq->src = rctx->src;
+	areq->nbytes = rctx->nbytes;
+
+	rctx->last_blk = 0;
+	rctx->first_blk = 0;
+
+	if (ret) {
+		pqcrypto_req_control->res = -ENXIO;
+		pstat->ahash_op_fail++;
+	} else {
+		pqcrypto_req_control->res = 0;
+		pstat->ahash_op_success++;
+	}
+	if (cp->ce_support.aligned_only)  {
+		areq->src = rctx->orig_src;
+		kfree(rctx->data);
+	}
+	req_done(pqcrypto_req_control);
+};
+
+static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb,
+		unsigned char *iv, int ret)
+{
+	struct ablkcipher_request *areq = (struct ablkcipher_request *) cookie;
+	struct crypto_async_request *async_req;
+	struct crypto_ablkcipher *ablk = crypto_ablkcipher_reqtfm(areq);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_engine *pengine;
+	struct qcrypto_req_control *pqcrypto_req_control;
+
+	async_req = &areq->base;
+	pstat = &_qcrypto_stat;
+	rctx = ablkcipher_request_ctx(areq);
+	pengine = rctx->pengine;
+	pqcrypto_req_control = find_req_control_for_areq(pengine,
+							 async_req);
+	if (pqcrypto_req_control == NULL) {
+		pr_err("async request not found\n");
+		return;
+	}
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&pengine->pdev->dev, "_qce_ablk_cipher_complete: %p ret %d\n",
+				areq, ret);
+#endif
+	if (iv)
+		memcpy(ctx->iv, iv, crypto_ablkcipher_ivsize(ablk));
+
+	if (ret) {
+		pqcrypto_req_control->res = -ENXIO;
+		pstat->ablk_cipher_op_fail++;
+	} else {
+		pqcrypto_req_control->res = 0;
+		pstat->ablk_cipher_op_success++;
+	}
+
+	if (cp->ce_support.aligned_only)  {
+		struct qcrypto_cipher_req_ctx *rctx;
+		uint32_t num_sg = 0;
+		uint32_t bytes = 0;
+
+		rctx = ablkcipher_request_ctx(areq);
+		areq->src = rctx->orig_src;
+		areq->dst = rctx->orig_dst;
+
+		num_sg = qcrypto_count_sg(areq->dst, areq->nbytes);
+		bytes = qcrypto_sg_copy_from_buffer(areq->dst, num_sg,
+			rctx->data, areq->nbytes);
+		if (bytes != areq->nbytes)
+			pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes,
+								areq->nbytes);
+		kzfree(rctx->data);
+	}
+	req_done(pqcrypto_req_control);
+};
+
+static void _qce_aead_complete(void *cookie, unsigned char *icv,
+				unsigned char *iv, int ret)
+{
+	struct aead_request *areq = (struct aead_request *) cookie;
+	struct crypto_async_request *async_req;
+	struct crypto_aead *aead = crypto_aead_reqtfm(areq);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm);
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct crypto_stat *pstat;
+	struct crypto_engine *pengine;
+	struct qcrypto_req_control *pqcrypto_req_control;
+
+	async_req = &areq->base;
+	pstat = &_qcrypto_stat;
+	rctx = aead_request_ctx(areq);
+	pengine = rctx->pengine;
+	pqcrypto_req_control = find_req_control_for_areq(pengine,
+							 async_req);
+	if (pqcrypto_req_control == NULL) {
+		pr_err("async request not found\n");
+		return;
+	}
+
+	if (rctx->mode == QCE_MODE_CCM) {
+		kzfree(rctx->adata);
+	} else {
+		uint32_t ivsize = crypto_aead_ivsize(aead);
+
+		if (ret == 0) {
+			if (rctx->dir  == QCE_ENCRYPT) {
+				/* copy the icv to dst */
+				scatterwalk_map_and_copy(icv, areq->dst,
+						areq->cryptlen + areq->assoclen,
+						ctx->authsize, 1);
+
+			} else {
+				unsigned char tmp[SHA256_DIGESTSIZE] = {0};
+
+				/* compare icv from src */
+				scatterwalk_map_and_copy(tmp,
+					areq->src, areq->assoclen +
+					areq->cryptlen - ctx->authsize,
+					ctx->authsize, 0);
+				ret = memcmp(icv, tmp, ctx->authsize);
+				if (ret != 0)
+					ret = -EBADMSG;
+
+			}
+		} else {
+			ret = -ENXIO;
+		}
+
+		if (iv)
+			memcpy(ctx->iv, iv, ivsize);
+	}
+
+	if (ret == (-EBADMSG))
+		pstat->aead_bad_msg++;
+	else if (ret)
+		pstat->aead_op_fail++;
+	else
+		pstat->aead_op_success++;
+
+	pqcrypto_req_control->res = ret;
+	req_done(pqcrypto_req_control);
+}
+
+static int aead_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize)
+{
+	__be32 data;
+
+	memset(block, 0, csize);
+	block += csize;
+
+	if (csize >= 4)
+		csize = 4;
+	else if (msglen > (1 << (8 * csize)))
+		return -EOVERFLOW;
+
+	data = cpu_to_be32(msglen);
+	memcpy(block - csize, (u8 *)&data + 4 - csize, csize);
+
+	return 0;
+}
+
+static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq)
+{
+	struct aead_request *areq = (struct aead_request *) qreq->areq;
+	unsigned int i = ((unsigned int)qreq->iv[0]) + 1;
+
+	memcpy(&qreq->nonce[0], qreq->iv, qreq->ivsize);
+	/*
+	 * Format control info per RFC 3610 and
+	 * NIST Special Publication 800-38C
+	 */
+	qreq->nonce[0] |= (8 * ((qreq->authsize - 2) / 2));
+	if (areq->assoclen)
+		qreq->nonce[0] |= 64;
+
+	if (i > MAX_NONCE)
+		return -EINVAL;
+
+	return aead_ccm_set_msg_len(qreq->nonce + 16 - i, qreq->cryptlen, i);
+}
+
+static int qcrypto_aead_ccm_format_adata(struct qce_req *qreq, uint32_t alen,
+				struct scatterlist *sg, unsigned char *adata)
+{
+	uint32_t len;
+	uint32_t bytes = 0;
+	uint32_t num_sg = 0;
+
+	/*
+	 * Add control info for associated data
+	 * RFC 3610 and NIST Special Publication 800-38C
+	 */
+	if (alen < 65280) {
+		*(__be16 *)adata = cpu_to_be16(alen);
+		len = 2;
+	} else {
+			if ((alen >= 65280) && (alen <= 0xffffffff)) {
+				*(__be16 *)adata = cpu_to_be16(0xfffe);
+				*(__be32 *)&adata[2] = cpu_to_be32(alen);
+				len = 6;
+		} else {
+				*(__be16 *)adata = cpu_to_be16(0xffff);
+				*(__be32 *)&adata[6] = cpu_to_be32(alen);
+				len = 10;
+		}
+	}
+	adata += len;
+	qreq->assoclen = ALIGN((alen + len), 16);
+
+	num_sg = qcrypto_count_sg(sg, alen);
+	bytes = qcrypto_sg_copy_to_buffer(sg, num_sg, adata, alen);
+	if (bytes != alen)
+		pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes, alen);
+
+	return 0;
+}
+
+static int _qcrypto_process_ablkcipher(struct crypto_engine *pengine,
+			struct qcrypto_req_control *pqcrypto_req_control)
+{
+	struct crypto_async_request *async_req;
+	struct qce_req qreq;
+	int ret;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *cipher_ctx;
+	struct ablkcipher_request *req;
+	struct crypto_ablkcipher *tfm;
+
+	async_req = pqcrypto_req_control->req;
+	req = container_of(async_req, struct ablkcipher_request, base);
+	cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->pengine = pengine;
+	tfm = crypto_ablkcipher_reqtfm(req);
+	if (pengine->pcp->ce_support.aligned_only) {
+		uint32_t bytes = 0;
+		uint32_t num_sg = 0;
+
+		rctx->orig_src = req->src;
+		rctx->orig_dst = req->dst;
+		rctx->data = kzalloc((req->nbytes + 64), GFP_ATOMIC);
+		if (rctx->data == NULL)
+			return -ENOMEM;
+		num_sg = qcrypto_count_sg(req->src, req->nbytes);
+		bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, rctx->data,
+								req->nbytes);
+		if (bytes != req->nbytes)
+			pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes,
+								req->nbytes);
+		sg_set_buf(&rctx->dsg, rctx->data, req->nbytes);
+		sg_mark_end(&rctx->dsg);
+		rctx->iv = req->info;
+
+		req->src = &rctx->dsg;
+		req->dst = &rctx->dsg;
+	}
+	qreq.op = QCE_REQ_ABLK_CIPHER;
+	qreq.qce_cb = _qce_ablk_cipher_complete;
+	qreq.areq = req;
+	qreq.alg = rctx->alg;
+	qreq.dir = rctx->dir;
+	qreq.mode = rctx->mode;
+	qreq.enckey = cipher_ctx->enc_key;
+	qreq.encklen = cipher_ctx->enc_key_len;
+	qreq.iv = req->info;
+	qreq.ivsize = crypto_ablkcipher_ivsize(tfm);
+	qreq.cryptlen = req->nbytes;
+	qreq.use_pmem = 0;
+	qreq.flags = cipher_ctx->flags;
+
+	if ((cipher_ctx->enc_key_len == 0) &&
+			(pengine->pcp->platform_support.hw_key_support == 0))
+		ret = -EINVAL;
+	else
+		ret =  qce_ablk_cipher_req(pengine->qce, &qreq);
+
+	return ret;
+}
+
+static int _qcrypto_process_ahash(struct crypto_engine *pengine,
+			struct qcrypto_req_control *pqcrypto_req_control)
+{
+	struct crypto_async_request *async_req;
+	struct ahash_request *req;
+	struct qce_sha_req sreq;
+	struct qcrypto_sha_req_ctx *rctx;
+	struct qcrypto_sha_ctx *sha_ctx;
+	int ret = 0;
+
+	async_req = pqcrypto_req_control->req;
+	req = container_of(async_req,
+				struct ahash_request, base);
+	rctx = ahash_request_ctx(req);
+	sha_ctx = crypto_tfm_ctx(async_req->tfm);
+	rctx->pengine = pengine;
+
+	sreq.qce_cb = _qce_ahash_complete;
+	sreq.digest =  &rctx->digest[0];
+	sreq.src = req->src;
+	sreq.auth_data[0] = rctx->byte_count[0];
+	sreq.auth_data[1] = rctx->byte_count[1];
+	sreq.auth_data[2] = rctx->byte_count[2];
+	sreq.auth_data[3] = rctx->byte_count[3];
+	sreq.first_blk = rctx->first_blk;
+	sreq.last_blk = rctx->last_blk;
+	sreq.size = req->nbytes;
+	sreq.areq = req;
+	sreq.flags = sha_ctx->flags;
+
+	switch (sha_ctx->alg) {
+	case QCE_HASH_SHA1:
+		sreq.alg = QCE_HASH_SHA1;
+		sreq.authkey = NULL;
+		break;
+	case QCE_HASH_SHA256:
+		sreq.alg = QCE_HASH_SHA256;
+		sreq.authkey = NULL;
+		break;
+	case QCE_HASH_SHA1_HMAC:
+		sreq.alg = QCE_HASH_SHA1_HMAC;
+		sreq.authkey = &sha_ctx->authkey[0];
+		sreq.authklen = SHA_HMAC_KEY_SIZE;
+		break;
+	case QCE_HASH_SHA256_HMAC:
+		sreq.alg = QCE_HASH_SHA256_HMAC;
+		sreq.authkey = &sha_ctx->authkey[0];
+		sreq.authklen = SHA_HMAC_KEY_SIZE;
+		break;
+	default:
+		pr_err("Algorithm %d not supported, exiting", sha_ctx->alg);
+		ret = -1;
+		break;
+	};
+	ret =  qce_process_sha_req(pengine->qce, &sreq);
+
+	return ret;
+}
+
+static int _qcrypto_process_aead(struct  crypto_engine *pengine,
+			struct qcrypto_req_control *pqcrypto_req_control)
+{
+	struct crypto_async_request *async_req;
+	struct qce_req qreq;
+	int ret = 0;
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *cipher_ctx;
+	struct aead_request *req;
+	struct crypto_aead *aead;
+
+	async_req = pqcrypto_req_control->req;
+	req = container_of(async_req, struct aead_request, base);
+	aead = crypto_aead_reqtfm(req);
+	rctx = aead_request_ctx(req);
+	rctx->pengine = pengine;
+	cipher_ctx = crypto_tfm_ctx(async_req->tfm);
+
+	qreq.op = QCE_REQ_AEAD;
+	qreq.qce_cb = _qce_aead_complete;
+
+	qreq.areq = req;
+	qreq.alg = rctx->alg;
+	qreq.dir = rctx->dir;
+	qreq.mode = rctx->mode;
+	qreq.iv = rctx->iv;
+
+	qreq.enckey = cipher_ctx->enc_key;
+	qreq.encklen = cipher_ctx->enc_key_len;
+	qreq.authkey = cipher_ctx->auth_key;
+	qreq.authklen = cipher_ctx->auth_key_len;
+	qreq.authsize = crypto_aead_authsize(aead);
+	qreq.auth_alg = cipher_ctx->auth_alg;
+	if (qreq.mode == QCE_MODE_CCM)
+		qreq.ivsize =  AES_BLOCK_SIZE;
+	else
+		qreq.ivsize =  crypto_aead_ivsize(aead);
+	qreq.flags = cipher_ctx->flags;
+
+	if (qreq.mode == QCE_MODE_CCM) {
+		if (qreq.dir == QCE_ENCRYPT)
+			qreq.cryptlen = req->cryptlen;
+		else
+			qreq.cryptlen = req->cryptlen -
+						qreq.authsize;
+		/* Get NONCE */
+		ret = qccrypto_set_aead_ccm_nonce(&qreq);
+		if (ret)
+			return ret;
+
+		if (req->assoclen) {
+			rctx->adata = kzalloc((req->assoclen + 0x64),
+								GFP_ATOMIC);
+			if (!rctx->adata)
+				return -ENOMEM;
+			/* Format Associated data    */
+			ret = qcrypto_aead_ccm_format_adata(&qreq,
+						req->assoclen,
+						req->src,
+						rctx->adata);
+		} else {
+			qreq.assoclen = 0;
+			rctx->adata = NULL;
+		}
+		if (ret) {
+			kzfree(rctx->adata);
+			return ret;
+		}
+
+		/*
+		 * update req with new formatted associated
+		 * data info
+		 */
+		qreq.asg = &rctx->asg;
+		if (rctx->adata)
+			sg_set_buf(qreq.asg, rctx->adata,
+					qreq.assoclen);
+		sg_mark_end(qreq.asg);
+	}
+	ret =  qce_aead_req(pengine->qce, &qreq);
+
+	return ret;
+}
+
+static struct crypto_engine *_qcrypto_static_assign_engine(
+					struct crypto_priv *cp)
+{
+	struct crypto_engine *pengine;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	if (cp->next_engine)
+		pengine = cp->next_engine;
+	else
+		pengine = list_first_entry(&cp->engine_list,
+				struct crypto_engine, elist);
+
+	if (list_is_last(&pengine->elist, &cp->engine_list))
+		cp->next_engine = list_first_entry(
+			&cp->engine_list, struct crypto_engine, elist);
+	else
+		cp->next_engine = list_next_entry(pengine, elist);
+	spin_unlock_irqrestore(&cp->lock, flags);
+	return pengine;
+}
+
+static int _start_qcrypto_process(struct crypto_priv *cp,
+				struct crypto_engine *pengine)
+{
+	struct crypto_async_request *async_req = NULL;
+	struct crypto_async_request *backlog_eng = NULL;
+	struct crypto_async_request *backlog_cp = NULL;
+	unsigned long flags;
+	u32 type;
+	int ret = 0;
+	struct crypto_stat *pstat;
+	void *tfm_ctx;
+	struct qcrypto_cipher_req_ctx *cipher_rctx;
+	struct qcrypto_sha_req_ctx *ahash_rctx;
+	struct ablkcipher_request *ablkcipher_req;
+	struct ahash_request *ahash_req;
+	struct aead_request *aead_req;
+	struct qcrypto_resp_ctx *arsp;
+	struct qcrypto_req_control *pqcrypto_req_control;
+	unsigned int cpu = MAX_SMP_CPU;
+
+	if (READ_ONCE(cp->ce_req_proc_sts) == STOPPED)
+		return 0;
+
+	if (in_interrupt()) {
+		cpu = smp_processor_id();
+		if (cpu >= MAX_SMP_CPU)
+			cpu = MAX_SMP_CPU - 1;
+	} else
+		cpu = MAX_SMP_CPU;
+
+	pstat = &_qcrypto_stat;
+
+again:
+	spin_lock_irqsave(&cp->lock, flags);
+	if (pengine->issue_req ||
+		atomic_read(&pengine->req_count) >= (pengine->max_req)) {
+		spin_unlock_irqrestore(&cp->lock, flags);
+		return 0;
+	}
+
+	backlog_eng = crypto_get_backlog(&pengine->req_queue);
+
+	/* make sure it is in high bandwidth state */
+	if (pengine->bw_state != BUS_HAS_BANDWIDTH) {
+		spin_unlock_irqrestore(&cp->lock, flags);
+		return 0;
+	}
+
+	/* try to get request from request queue of the engine first */
+	async_req = crypto_dequeue_request(&pengine->req_queue);
+	if (!async_req) {
+		/*
+		 * if no request from the engine,
+		 * try to  get from request queue of driver
+		 */
+		backlog_cp = crypto_get_backlog(&cp->req_queue);
+		async_req = crypto_dequeue_request(&cp->req_queue);
+		if (!async_req) {
+			spin_unlock_irqrestore(&cp->lock, flags);
+			return 0;
+		}
+	}
+	pqcrypto_req_control = qcrypto_alloc_req_control(pengine);
+	if (pqcrypto_req_control == NULL) {
+		pr_err("Allocation of request failed\n");
+		spin_unlock_irqrestore(&cp->lock, flags);
+		return 0;
+	}
+
+	/* add associated rsp entry to tfm response queue */
+	type = crypto_tfm_alg_type(async_req->tfm);
+	tfm_ctx = crypto_tfm_ctx(async_req->tfm);
+	switch (type) {
+	case CRYPTO_ALG_TYPE_AHASH:
+		ahash_req = container_of(async_req,
+			struct ahash_request, base);
+		ahash_rctx = ahash_request_ctx(ahash_req);
+		arsp = &ahash_rctx->rsp_entry;
+		list_add_tail(
+			&arsp->list,
+			&((struct qcrypto_sha_ctx *)tfm_ctx)
+				->rsp_queue);
+		break;
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		ablkcipher_req = container_of(async_req,
+			struct ablkcipher_request, base);
+		cipher_rctx = ablkcipher_request_ctx(ablkcipher_req);
+		arsp = &cipher_rctx->rsp_entry;
+		list_add_tail(
+			&arsp->list,
+			&((struct qcrypto_cipher_ctx *)tfm_ctx)
+				->rsp_queue);
+		break;
+	case CRYPTO_ALG_TYPE_AEAD:
+	default:
+		aead_req = container_of(async_req,
+			struct aead_request, base);
+		cipher_rctx = aead_request_ctx(aead_req);
+		arsp = &cipher_rctx->rsp_entry;
+		list_add_tail(
+			&arsp->list,
+			&((struct qcrypto_cipher_ctx *)tfm_ctx)
+				->rsp_queue);
+		break;
+	}
+
+	arsp->res = -EINPROGRESS;
+	arsp->async_req = async_req;
+	pqcrypto_req_control->pce = pengine;
+	pqcrypto_req_control->req = async_req;
+	pqcrypto_req_control->arsp = arsp;
+	pengine->active_seq++;
+	pengine->check_flag = true;
+
+	pengine->issue_req = true;
+	cp->cpu_req[cpu]++;
+	smp_mb(); /* make it visible */
+
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (backlog_eng)
+		backlog_eng->complete(backlog_eng, -EINPROGRESS);
+	if (backlog_cp)
+		backlog_cp->complete(backlog_cp, -EINPROGRESS);
+	switch (type) {
+	case CRYPTO_ALG_TYPE_ABLKCIPHER:
+		ret = _qcrypto_process_ablkcipher(pengine,
+					pqcrypto_req_control);
+		break;
+	case CRYPTO_ALG_TYPE_AHASH:
+		ret = _qcrypto_process_ahash(pengine, pqcrypto_req_control);
+		break;
+	case CRYPTO_ALG_TYPE_AEAD:
+		ret = _qcrypto_process_aead(pengine, pqcrypto_req_control);
+		break;
+	default:
+		ret = -EINVAL;
+	};
+
+	pengine->issue_req = false;
+	smp_mb(); /* make it visible */
+
+	pengine->total_req++;
+	if (ret) {
+		pengine->err_req++;
+		qcrypto_free_req_control(pengine, pqcrypto_req_control);
+
+		if (type == CRYPTO_ALG_TYPE_ABLKCIPHER)
+			pstat->ablk_cipher_op_fail++;
+		else
+			if (type == CRYPTO_ALG_TYPE_AHASH)
+				pstat->ahash_op_fail++;
+			else
+				pstat->aead_op_fail++;
+
+		_qcrypto_tfm_complete(pengine, type, tfm_ctx, arsp, ret);
+		goto again;
+	};
+	return ret;
+}
+
+static inline struct crypto_engine *_next_eng(struct crypto_priv *cp,
+		struct crypto_engine *p)
+{
+
+	if (p == NULL || list_is_last(&p->elist, &cp->engine_list))
+		p =  list_first_entry(&cp->engine_list, struct crypto_engine,
+			elist);
+	else
+		p = list_entry(p->elist.next, struct crypto_engine, elist);
+	return p;
+}
+static struct crypto_engine *_avail_eng(struct crypto_priv *cp)
+{
+	/* call this function with spinlock set */
+	struct crypto_engine *q = NULL;
+	struct crypto_engine *p = cp->scheduled_eng;
+	struct crypto_engine *q1;
+	int eng_cnt = cp->total_units;
+
+	if (unlikely(list_empty(&cp->engine_list))) {
+		pr_err("%s: no valid ce to schedule\n", __func__);
+		return NULL;
+	}
+
+	p = _next_eng(cp, p);
+	q1 = p;
+	while (eng_cnt-- > 0) {
+		if (!p->issue_req && atomic_read(&p->req_count) < p->max_req) {
+			q = p;
+			break;
+		}
+		p = _next_eng(cp, p);
+		if (q1 == p)
+			break;
+	}
+	cp->scheduled_eng = q;
+	return q;
+}
+
+static int _qcrypto_queue_req(struct crypto_priv *cp,
+				struct crypto_engine *pengine,
+				struct crypto_async_request *req)
+{
+	int ret;
+	unsigned long flags;
+
+	spin_lock_irqsave(&cp->lock, flags);
+
+	if (pengine) {
+		ret = crypto_enqueue_request(&pengine->req_queue, req);
+	} else {
+		ret = crypto_enqueue_request(&cp->req_queue, req);
+		pengine = _avail_eng(cp);
+		if (cp->req_queue.qlen > cp->max_qlen)
+			cp->max_qlen = cp->req_queue.qlen;
+	}
+	if (pengine) {
+		switch (pengine->bw_state) {
+		case BUS_NO_BANDWIDTH:
+			if (pengine->high_bw_req == false) {
+				qcrypto_ce_bw_allocate_req(pengine);
+				pengine->high_bw_req = true;
+			}
+			pengine = NULL;
+			break;
+		case BUS_HAS_BANDWIDTH:
+			break;
+		case BUS_BANDWIDTH_RELEASING:
+			pengine->high_bw_req = true;
+			pengine = NULL;
+			break;
+		case BUS_BANDWIDTH_ALLOCATING:
+			pengine = NULL;
+			break;
+		case BUS_SUSPENDED:
+		case BUS_SUSPENDING:
+		default:
+			pengine = NULL;
+			break;
+		}
+	} else {
+		cp->no_avail++;
+	}
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (pengine && (READ_ONCE(cp->ce_req_proc_sts) == IN_PROGRESS))
+		_start_qcrypto_process(cp, pengine);
+	return ret;
+}
+
+static int _qcrypto_enc_aes_192_fallback(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	int err;
+
+	SKCIPHER_REQUEST_ON_STACK(subreq, ctx->cipher_aes192_fb);
+	skcipher_request_set_tfm(subreq, ctx->cipher_aes192_fb);
+	skcipher_request_set_callback(subreq, req->base.flags,
+					NULL, NULL);
+	skcipher_request_set_crypt(subreq, req->src, req->dst,
+					req->nbytes, req->info);
+	err = crypto_skcipher_encrypt(subreq);
+	skcipher_request_zero(subreq);
+	return err;
+}
+
+static int _qcrypto_dec_aes_192_fallback(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	int err;
+
+	SKCIPHER_REQUEST_ON_STACK(subreq, ctx->cipher_aes192_fb);
+	skcipher_request_set_tfm(subreq, ctx->cipher_aes192_fb);
+	skcipher_request_set_callback(subreq, req->base.flags,
+					NULL, NULL);
+	skcipher_request_set_crypt(subreq, req->src, req->dst,
+					req->nbytes, req->info);
+	err = crypto_skcipher_decrypt(subreq);
+	skcipher_request_zero(subreq);
+	return err;
+}
+
+
+static int _qcrypto_enc_aes_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ecb: %p\n", req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_enc_aes_192_fallback(req);
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_aes_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_cbc: %p\n", req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_enc_aes_192_fallback(req);
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_aes_ctr(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+				CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "_qcrypto_enc_aes_ctr: %p\n", req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_enc_aes_192_fallback(req);
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CTR;
+
+	pstat->ablk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_aes_xts(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_XTS;
+
+	pstat->ablk_cipher_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_aead_encrypt_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
+		return  -EINVAL;
+	if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
+		(ctx->auth_key_len != AES_KEYSIZE_256))
+		return  -EINVAL;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	rctx->iv = req->iv;
+
+	pstat->aead_ccm_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_rfc4309_enc_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	memset(rctx->rfc4309_iv, 0, sizeof(rctx->rfc4309_iv));
+	rctx->rfc4309_iv[0] = 3; /* L -1 */
+	memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3);
+	memcpy(&rctx->rfc4309_iv[4], req->iv, 8);
+	rctx->iv = rctx->rfc4309_iv;
+	pstat->aead_rfc4309_ccm_aes_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_enc_des_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_des_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_3des_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_3des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_enc_3des_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_3des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_aes_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+				CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ecb: %p\n", req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_dec_aes_192_fallback(req);
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_aes_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+				CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_cbc: %p\n", req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_dec_aes_192_fallback(req);
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_aes_ctr(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev, "_qcrypto_dec_aes_ctr: %p\n", req);
+#endif
+
+	if ((ctx->enc_key_len == AES_KEYSIZE_192) &&
+			(!cp->ce_support.aes_key_192) &&
+				ctx->cipher_aes192_fb)
+		return _qcrypto_dec_aes_192_fallback(req);
+
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->mode = QCE_MODE_CTR;
+
+	/* Note. There is no such thing as aes/counter mode, decrypt */
+	rctx->dir = QCE_ENCRYPT;
+
+	pstat->ablk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_des_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_des_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_3des_ecb(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_ECB;
+
+	pstat->ablk_cipher_3des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_3des_cbc(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+
+	pstat->ablk_cipher_3des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_dec_aes_xts(struct ablkcipher_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	WARN_ON(crypto_tfm_alg_type(req->base.tfm) !=
+					CRYPTO_ALG_TYPE_ABLKCIPHER);
+	rctx = ablkcipher_request_ctx(req);
+	rctx->aead = 0;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->mode = QCE_MODE_XTS;
+	rctx->dir = QCE_DECRYPT;
+
+	pstat->ablk_cipher_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+};
+
+static int _qcrypto_aead_decrypt_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1))
+		return  -EINVAL;
+	if ((ctx->auth_key_len != AES_KEYSIZE_128) &&
+		(ctx->auth_key_len != AES_KEYSIZE_256))
+		return  -EINVAL;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	rctx->iv = req->iv;
+
+	pstat->aead_ccm_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_rfc4309_dec_aes_ccm(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CCM;
+	memset(rctx->rfc4309_iv, 0, sizeof(rctx->rfc4309_iv));
+	rctx->rfc4309_iv[0] = 3; /* L -1 */
+	memcpy(&rctx->rfc4309_iv[1], ctx->ccm4309_nonce, 3);
+	memcpy(&rctx->rfc4309_iv[4], req->iv, 8);
+	rctx->iv = rctx->rfc4309_iv;
+	pstat->aead_rfc4309_ccm_aes_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_setauthsize(struct crypto_aead *authenc,
+				unsigned int authsize)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+	ctx->authsize = authsize;
+	return 0;
+}
+
+static int _qcrypto_aead_ccm_setauthsize(struct crypto_aead *authenc,
+				  unsigned int authsize)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+	switch (authsize) {
+	case 4:
+	case 6:
+	case 8:
+	case 10:
+	case 12:
+	case 14:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+	ctx->authsize = authsize;
+	return 0;
+}
+
+static int _qcrypto_aead_rfc4309_ccm_setauthsize(struct crypto_aead *authenc,
+				  unsigned int authsize)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc);
+
+	switch (authsize) {
+	case 8:
+	case 12:
+	case 16:
+		break;
+	default:
+		return -EINVAL;
+	}
+	ctx->authsize = authsize;
+	return 0;
+}
+
+static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key,
+			unsigned int keylen)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm);
+	struct rtattr *rta = (struct rtattr *)key;
+	struct crypto_authenc_key_param *param;
+	int ret;
+
+	if (!RTA_OK(rta, keylen))
+		goto badkey;
+	if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM)
+		goto badkey;
+	if (RTA_PAYLOAD(rta) < sizeof(*param))
+		goto badkey;
+
+	param = RTA_DATA(rta);
+	ctx->enc_key_len = be32_to_cpu(param->enckeylen);
+
+	key += RTA_ALIGN(rta->rta_len);
+	keylen -= RTA_ALIGN(rta->rta_len);
+
+	if (keylen < ctx->enc_key_len)
+		goto badkey;
+
+	ctx->auth_key_len = keylen - ctx->enc_key_len;
+	if (ctx->enc_key_len >= QCRYPTO_MAX_KEY_SIZE ||
+				ctx->auth_key_len >= QCRYPTO_MAX_KEY_SIZE)
+		goto badkey;
+	memset(ctx->auth_key, 0, QCRYPTO_MAX_KEY_SIZE);
+	memcpy(ctx->enc_key, key + ctx->auth_key_len, ctx->enc_key_len);
+	memcpy(ctx->auth_key, key, ctx->auth_key_len);
+
+	if (ctx->enc_key_len == AES_KEYSIZE_192 &&  ctx->cipher_aes192_fb &&
+			ctx->ahash_aead_aes192_fb) {
+		crypto_ahash_clear_flags(ctx->ahash_aead_aes192_fb, ~0);
+		ret = crypto_ahash_setkey(ctx->ahash_aead_aes192_fb,
+					ctx->auth_key, ctx->auth_key_len);
+		if (ret)
+			goto badkey;
+		crypto_skcipher_clear_flags(ctx->cipher_aes192_fb, ~0);
+		ret = crypto_skcipher_setkey(ctx->cipher_aes192_fb,
+					ctx->enc_key, ctx->enc_key_len);
+		if (ret)
+			goto badkey;
+	}
+
+	return 0;
+badkey:
+	ctx->enc_key_len = 0;
+	crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+	return -EINVAL;
+}
+
+static int _qcrypto_aead_ccm_setkey(struct crypto_aead *aead, const u8 *key,
+			unsigned int keylen)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	switch (keylen) {
+	case AES_KEYSIZE_128:
+	case AES_KEYSIZE_256:
+		break;
+	case AES_KEYSIZE_192:
+		if (cp->ce_support.aes_key_192)
+			break;
+	default:
+		ctx->enc_key_len = 0;
+		crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN);
+		return -EINVAL;
+	};
+	ctx->enc_key_len = keylen;
+	memcpy(ctx->enc_key, key, keylen);
+	ctx->auth_key_len = keylen;
+	memcpy(ctx->auth_key, key, keylen);
+
+	return 0;
+}
+
+static int _qcrypto_aead_rfc4309_ccm_setkey(struct crypto_aead *aead,
+				 const u8 *key, unsigned int key_len)
+{
+	struct crypto_tfm *tfm = crypto_aead_tfm(aead);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm);
+	int ret;
+
+	if (key_len < QCRYPTO_CCM4309_NONCE_LEN)
+		return -EINVAL;
+	key_len -= QCRYPTO_CCM4309_NONCE_LEN;
+	memcpy(ctx->ccm4309_nonce, key + key_len,  QCRYPTO_CCM4309_NONCE_LEN);
+	ret = _qcrypto_aead_ccm_setkey(aead, key, key_len);
+	return ret;
+};
+
+static void _qcrypto_aead_aes_192_fb_a_cb(struct qcrypto_cipher_req_ctx *rctx,
+								int res)
+{
+	struct aead_request *req;
+	struct crypto_async_request *areq;
+
+	req = rctx->aead_req;
+	areq = &req->base;
+	if (rctx->fb_aes_req)
+		skcipher_request_free(rctx->fb_aes_req);
+	if (rctx->fb_hash_req)
+		ahash_request_free(rctx->fb_hash_req);
+	rctx->fb_aes_req = NULL;
+	rctx->fb_hash_req = NULL;
+	kfree(rctx->fb_aes_iv);
+	areq->complete(areq, res);
+}
+
+static void _aead_aes_fb_stage2_ahash_complete(
+				struct crypto_async_request *base, int err)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct aead_request *req;
+	struct qcrypto_cipher_ctx *ctx;
+
+	rctx = base->data;
+	req = rctx->aead_req;
+	ctx = crypto_tfm_ctx(req->base.tfm);
+	/* copy icv */
+	if (err == 0)
+		scatterwalk_map_and_copy(rctx->fb_ahash_digest,
+					rctx->fb_aes_dst,
+					req->cryptlen,
+					ctx->authsize, 1);
+	_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+}
+
+
+static int _start_aead_aes_fb_stage2_hmac(struct qcrypto_cipher_req_ctx *rctx)
+{
+	struct ahash_request *ahash_req;
+
+	ahash_req = rctx->fb_hash_req;
+	ahash_request_set_callback(ahash_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+				 _aead_aes_fb_stage2_ahash_complete, rctx);
+
+	return crypto_ahash_digest(ahash_req);
+}
+
+static void _aead_aes_fb_stage2_decrypt_complete(
+			struct crypto_async_request *base, int err)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+
+	rctx = base->data;
+	_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+}
+
+static int _start_aead_aes_fb_stage2_decrypt(
+					struct qcrypto_cipher_req_ctx *rctx)
+{
+	struct skcipher_request *aes_req;
+
+	aes_req = rctx->fb_aes_req;
+	skcipher_request_set_callback(aes_req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+			_aead_aes_fb_stage2_decrypt_complete, rctx);
+	return crypto_skcipher_decrypt(aes_req);
+}
+
+static void _aead_aes_fb_stage1_ahash_complete(
+				struct crypto_async_request *base, int err)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct aead_request *req;
+	struct qcrypto_cipher_ctx *ctx;
+
+	rctx = base->data;
+	req = rctx->aead_req;
+	ctx = crypto_tfm_ctx(req->base.tfm);
+
+	/* compare icv */
+	if (err == 0) {
+		unsigned char tmp[ctx->authsize];
+
+		scatterwalk_map_and_copy(tmp, rctx->fb_aes_src,
+			req->cryptlen - ctx->authsize, ctx->authsize, 0);
+		if (memcmp(rctx->fb_ahash_digest, tmp, ctx->authsize) != 0)
+			err = -EBADMSG;
+	}
+	if (err)
+		_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+	else {
+		err = _start_aead_aes_fb_stage2_decrypt(rctx);
+		if (err != -EINPROGRESS &&  err != -EBUSY)
+			_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+	}
+}
+
+static void _aead_aes_fb_stage1_encrypt_complete(
+				struct crypto_async_request *base, int err)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct aead_request *req;
+	struct qcrypto_cipher_ctx *ctx;
+
+	rctx = base->data;
+	req = rctx->aead_req;
+	ctx = crypto_tfm_ctx(req->base.tfm);
+
+	memcpy(ctx->iv, rctx->fb_aes_iv, rctx->ivsize);
+
+	if (err) {
+		_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+		return;
+	}
+
+	err = _start_aead_aes_fb_stage2_hmac(rctx);
+
+	/* copy icv */
+	if (err == 0) {
+		scatterwalk_map_and_copy(rctx->fb_ahash_digest,
+					rctx->fb_aes_dst,
+					req->cryptlen,
+					ctx->authsize, 1);
+	}
+	if (err != -EINPROGRESS &&  err != -EBUSY)
+		_qcrypto_aead_aes_192_fb_a_cb(rctx, err);
+}
+
+static int _qcrypto_aead_aes_192_fallback(struct aead_request *req,
+							bool is_encrypt)
+{
+	int rc = -EINVAL;
+	struct qcrypto_cipher_req_ctx *rctx = aead_request_ctx(req);
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_aead *aead_tfm = crypto_aead_reqtfm(req);
+	struct skcipher_request *aes_req = NULL;
+	struct ahash_request *ahash_req = NULL;
+	int nbytes;
+	struct scatterlist *src, *dst;
+
+	rctx->fb_aes_iv = NULL;
+	aes_req = skcipher_request_alloc(ctx->cipher_aes192_fb, GFP_KERNEL);
+	if (!aes_req)
+		return -ENOMEM;
+	ahash_req = ahash_request_alloc(ctx->ahash_aead_aes192_fb, GFP_KERNEL);
+	if (!ahash_req)
+		goto ret;
+	rctx->fb_aes_req = aes_req;
+	rctx->fb_hash_req = ahash_req;
+	rctx->aead_req = req;
+	/* assoc and iv are sitting in the beginning of src sg list */
+	/* Similarly, assoc and iv are sitting in the beginning of dst list */
+	src = scatterwalk_ffwd(rctx->fb_ablkcipher_src_sg, req->src,
+				req->assoclen);
+	dst = scatterwalk_ffwd(rctx->fb_ablkcipher_dst_sg, req->dst,
+				req->assoclen);
+
+	nbytes = req->cryptlen;
+	if (!is_encrypt)
+		nbytes -=  ctx->authsize;
+	rctx->fb_ahash_length = nbytes +  req->assoclen;
+	rctx->fb_aes_src = src;
+	rctx->fb_aes_dst = dst;
+	rctx->fb_aes_cryptlen = nbytes;
+	rctx->ivsize = crypto_aead_ivsize(aead_tfm);
+	rctx->fb_aes_iv = kzalloc(rctx->ivsize, GFP_ATOMIC);
+	if (!rctx->fb_aes_iv)
+		goto ret;
+	memcpy(rctx->fb_aes_iv, req->iv, rctx->ivsize);
+	skcipher_request_set_crypt(aes_req, rctx->fb_aes_src,
+					rctx->fb_aes_dst,
+					rctx->fb_aes_cryptlen, rctx->fb_aes_iv);
+	if (is_encrypt)
+		ahash_request_set_crypt(ahash_req, req->dst,
+					rctx->fb_ahash_digest,
+					rctx->fb_ahash_length);
+	else
+		ahash_request_set_crypt(ahash_req, req->src,
+					rctx->fb_ahash_digest,
+					rctx->fb_ahash_length);
+
+	if (is_encrypt) {
+
+		skcipher_request_set_callback(aes_req,
+			CRYPTO_TFM_REQ_MAY_BACKLOG,
+			_aead_aes_fb_stage1_encrypt_complete, rctx);
+
+		rc = crypto_skcipher_encrypt(aes_req);
+		if (rc == 0) {
+			memcpy(ctx->iv, rctx->fb_aes_iv, rctx->ivsize);
+			rc = _start_aead_aes_fb_stage2_hmac(rctx);
+			if (rc == 0) {
+				/* copy icv */
+				scatterwalk_map_and_copy(rctx->fb_ahash_digest,
+					dst,
+					req->cryptlen,
+					ctx->authsize, 1);
+			}
+		}
+		if (rc == -EINPROGRESS || rc == -EBUSY)
+			return rc;
+		goto ret;
+
+	} else {
+		ahash_request_set_callback(ahash_req,
+				CRYPTO_TFM_REQ_MAY_BACKLOG,
+				_aead_aes_fb_stage1_ahash_complete, rctx);
+
+		rc = crypto_ahash_digest(ahash_req);
+		if (rc == 0) {
+			unsigned char tmp[ctx->authsize];
+
+			/* compare icv */
+			scatterwalk_map_and_copy(tmp,
+				src, req->cryptlen - ctx->authsize,
+				ctx->authsize, 0);
+			if (memcmp(rctx->fb_ahash_digest, tmp,
+							ctx->authsize) != 0)
+				rc = -EBADMSG;
+			else
+				rc = _start_aead_aes_fb_stage2_decrypt(rctx);
+		}
+		if (rc == -EINPROGRESS || rc == -EBUSY)
+			return rc;
+		goto ret;
+	}
+ret:
+	if (aes_req)
+		skcipher_request_free(aes_req);
+	if (ahash_req)
+		ahash_request_free(ahash_req);
+	kfree(rctx->fb_aes_iv);
+	return rc;
+}
+
+static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev,
+			 "_qcrypto_aead_encrypt_aes_cbc: %p\n", req);
+#endif
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+	rctx->aead_req = req;
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_aes_enc++;
+	else
+		pstat->aead_sha256_aes_enc++;
+	if (ctx->enc_key_len == AES_KEYSIZE_192 &&  ctx->cipher_aes192_fb &&
+						ctx->ahash_aead_aes192_fb)
+		return _qcrypto_aead_aes_192_fallback(req, true);
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+#ifdef QCRYPTO_DEBUG
+	dev_info(&ctx->pengine->pdev->dev,
+			 "_qcrypto_aead_decrypt_aes_cbc: %p\n", req);
+#endif
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_AES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+	rctx->aead_req = req;
+
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_aes_dec++;
+	else
+		pstat->aead_sha256_aes_dec++;
+
+	if (ctx->enc_key_len == AES_KEYSIZE_192 &&  ctx->cipher_aes192_fb &&
+						ctx->ahash_aead_aes192_fb)
+		return _qcrypto_aead_aes_192_fallback(req, false);
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_encrypt_des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_des_enc++;
+	else
+		pstat->aead_sha256_des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_des_dec++;
+	else
+		pstat->aead_sha256_des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_encrypt_3des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_ENCRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_3des_enc++;
+	else
+		pstat->aead_sha256_3des_enc++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _qcrypto_aead_decrypt_3des_cbc(struct aead_request *req)
+{
+	struct qcrypto_cipher_req_ctx *rctx;
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_stat *pstat;
+
+	pstat = &_qcrypto_stat;
+
+	rctx = aead_request_ctx(req);
+	rctx->aead = 1;
+	rctx->alg = CIPHER_ALG_3DES;
+	rctx->dir = QCE_DECRYPT;
+	rctx->mode = QCE_MODE_CBC;
+	rctx->iv = req->iv;
+
+	if (ctx->auth_alg == QCE_HASH_SHA1_HMAC)
+		pstat->aead_sha1_3des_dec++;
+	else
+		pstat->aead_sha256_3des_dec++;
+	return _qcrypto_queue_req(cp, ctx->pengine, &req->base);
+}
+
+static int _sha_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	rctx->first_blk = 1;
+	rctx->last_blk = 0;
+	rctx->byte_count[0] = 0;
+	rctx->byte_count[1] = 0;
+	rctx->byte_count[2] = 0;
+	rctx->byte_count[3] = 0;
+	rctx->trailing_buf_len = 0;
+	rctx->count = 0;
+
+	return 0;
+};
+
+static int _sha1_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_stat *pstat;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+
+	_sha_init(req);
+	sha_ctx->alg = QCE_HASH_SHA1;
+
+	memset(&rctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+						SHA1_DIGEST_SIZE);
+	sha_ctx->diglen = SHA1_DIGEST_SIZE;
+	pstat->sha1_digest++;
+	return 0;
+};
+
+static int _sha256_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_stat *pstat;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+
+	_sha_init(req);
+	sha_ctx->alg = QCE_HASH_SHA256;
+
+	memset(&rctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+						SHA256_DIGEST_SIZE);
+	sha_ctx->diglen = SHA256_DIGEST_SIZE;
+	pstat->sha256_digest++;
+	return 0;
+};
+
+
+static int _sha1_export(struct ahash_request  *req, void *out)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha1_state *out_ctx = (struct sha1_state *)out;
+
+	out_ctx->count = rctx->count;
+	_byte_stream_to_words(out_ctx->state, rctx->digest, SHA1_DIGEST_SIZE);
+	memcpy(out_ctx->buffer, rctx->trailing_buf, SHA1_BLOCK_SIZE);
+
+	return 0;
+};
+
+static int _sha1_hmac_export(struct ahash_request  *req, void *out)
+{
+	return _sha1_export(req, out);
+}
+
+/* crypto hw padding constant for hmac first operation */
+#define HMAC_PADDING 64
+
+static int __sha1_import_common(struct ahash_request  *req, const void *in,
+				bool hmac)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha1_state *in_ctx = (struct sha1_state *)in;
+	u64 hw_count = in_ctx->count;
+
+	rctx->count = in_ctx->count;
+	memcpy(rctx->trailing_buf, in_ctx->buffer, SHA1_BLOCK_SIZE);
+	if (in_ctx->count <= SHA1_BLOCK_SIZE) {
+		rctx->first_blk = 1;
+	} else {
+		rctx->first_blk = 0;
+		/*
+		 * For hmac, there is a hardware padding done
+		 * when first is set. So the byte_count will be
+		 * incremened by 64 after the operstion of first
+		 */
+		if (hmac)
+			hw_count += HMAC_PADDING;
+	}
+	rctx->byte_count[0] =  (uint32_t)(hw_count & 0xFFFFFFC0);
+	rctx->byte_count[1] =  (uint32_t)(hw_count >> 32);
+	_words_to_byte_stream(in_ctx->state, rctx->digest, sha_ctx->diglen);
+
+	rctx->trailing_buf_len = (uint32_t)(in_ctx->count &
+						(SHA1_BLOCK_SIZE-1));
+	return 0;
+}
+
+static int _sha1_import(struct ahash_request  *req, const void *in)
+{
+	return __sha1_import_common(req, in, false);
+}
+
+static int _sha1_hmac_import(struct ahash_request  *req, const void *in)
+{
+	return __sha1_import_common(req, in, true);
+}
+
+static int _sha256_export(struct ahash_request  *req, void *out)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha256_state *out_ctx = (struct sha256_state *)out;
+
+	out_ctx->count = rctx->count;
+	_byte_stream_to_words(out_ctx->state, rctx->digest, SHA256_DIGEST_SIZE);
+	memcpy(out_ctx->buf, rctx->trailing_buf, SHA256_BLOCK_SIZE);
+
+	return 0;
+};
+
+static int _sha256_hmac_export(struct ahash_request  *req, void *out)
+{
+	return _sha256_export(req, out);
+}
+
+static int __sha256_import_common(struct ahash_request  *req, const void *in,
+			bool hmac)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct sha256_state *in_ctx = (struct sha256_state *)in;
+	u64 hw_count = in_ctx->count;
+
+	rctx->count = in_ctx->count;
+	memcpy(rctx->trailing_buf, in_ctx->buf, SHA256_BLOCK_SIZE);
+
+	if (in_ctx->count <= SHA256_BLOCK_SIZE) {
+		rctx->first_blk = 1;
+	} else {
+		rctx->first_blk = 0;
+		/*
+		 * for hmac, there is a hardware padding done
+		 * when first is set. So the byte_count will be
+		 * incremened by 64 after the operstion of first
+		 */
+		if (hmac)
+			hw_count += HMAC_PADDING;
+	}
+
+	rctx->byte_count[0] =  (uint32_t)(hw_count & 0xFFFFFFC0);
+	rctx->byte_count[1] =  (uint32_t)(hw_count >> 32);
+	_words_to_byte_stream(in_ctx->state, rctx->digest, sha_ctx->diglen);
+
+	rctx->trailing_buf_len = (uint32_t)(in_ctx->count &
+						(SHA256_BLOCK_SIZE-1));
+
+
+	return 0;
+}
+
+static int _sha256_import(struct ahash_request  *req, const void *in)
+{
+	return __sha256_import_common(req, in, false);
+}
+
+static int _sha256_hmac_import(struct ahash_request  *req, const void *in)
+{
+	return __sha256_import_common(req, in, true);
+}
+
+static int _copy_source(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *srctx = NULL;
+	uint32_t bytes = 0;
+	uint32_t num_sg = 0;
+
+	srctx = ahash_request_ctx(req);
+	srctx->orig_src = req->src;
+	srctx->data = kzalloc((req->nbytes + 64), GFP_ATOMIC);
+	if (srctx->data == NULL) {
+		pr_err("Mem Alloc fail rctx->data, err %ld for 0x%x\n",
+				PTR_ERR(srctx->data), (req->nbytes + 64));
+		return -ENOMEM;
+	}
+
+	num_sg = qcrypto_count_sg(req->src, req->nbytes);
+	bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, srctx->data,
+						req->nbytes);
+	if (bytes != req->nbytes)
+		pr_warn("bytes copied=0x%x bytes to copy= 0x%x", bytes,
+							req->nbytes);
+	sg_set_buf(&srctx->dsg, srctx->data,
+				req->nbytes);
+	sg_mark_end(&srctx->dsg);
+	req->src = &srctx->dsg;
+
+	return 0;
+}
+
+static int _sha_update(struct ahash_request  *req, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	uint32_t total, len, num_sg;
+	struct scatterlist *sg_last;
+	uint8_t *k_src = NULL;
+	uint32_t sha_pad_len = 0;
+	uint32_t trailing_buf_len = 0;
+	uint32_t nbytes;
+	uint32_t offset = 0;
+	uint32_t bytes = 0;
+	uint8_t  *staging;
+	int ret = 0;
+
+	/* check for trailing buffer from previous updates and append it */
+	total = req->nbytes + rctx->trailing_buf_len;
+	len = req->nbytes;
+
+	if (total <= sha_block_size) {
+		k_src = &rctx->trailing_buf[rctx->trailing_buf_len];
+		num_sg = qcrypto_count_sg(req->src, len);
+		bytes = qcrypto_sg_copy_to_buffer(req->src, num_sg, k_src, len);
+
+		rctx->trailing_buf_len = total;
+		return 0;
+	}
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+
+	staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+							L1_CACHE_BYTES);
+	memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+	k_src = &rctx->trailing_buf[0];
+	/*  get new trailing buffer */
+	sha_pad_len = ALIGN(total, sha_block_size) - total;
+	trailing_buf_len =  sha_block_size - sha_pad_len;
+	offset = req->nbytes - trailing_buf_len;
+
+	if (offset != req->nbytes)
+		scatterwalk_map_and_copy(k_src, req->src, offset,
+						trailing_buf_len, 0);
+
+	nbytes = total - trailing_buf_len;
+	num_sg = qcrypto_count_sg(req->src, req->nbytes);
+
+	len = rctx->trailing_buf_len;
+	sg_last = req->src;
+
+	while (len < nbytes) {
+		if ((len + sg_last->length) > nbytes)
+			break;
+		len += sg_last->length;
+		sg_last = sg_next(sg_last);
+	}
+	if (rctx->trailing_buf_len) {
+		if (cp->ce_support.aligned_only)  {
+			rctx->data2 = kzalloc((req->nbytes + 64), GFP_ATOMIC);
+			if (rctx->data2 == NULL) {
+				pr_err("Mem Alloc fail srctx->data2, err %ld\n",
+							PTR_ERR(rctx->data2));
+				return -ENOMEM;
+			}
+			memcpy(rctx->data2, staging,
+						rctx->trailing_buf_len);
+			memcpy((rctx->data2 + rctx->trailing_buf_len),
+					rctx->data, req->src->length);
+			kzfree(rctx->data);
+			rctx->data = rctx->data2;
+			sg_set_buf(&rctx->sg[0], rctx->data,
+					(rctx->trailing_buf_len +
+							req->src->length));
+			req->src = rctx->sg;
+			sg_mark_end(&rctx->sg[0]);
+		} else {
+			sg_mark_end(sg_last);
+			memset(rctx->sg, 0, sizeof(rctx->sg));
+			sg_set_buf(&rctx->sg[0], staging,
+						rctx->trailing_buf_len);
+			sg_mark_end(&rctx->sg[1]);
+			sg_chain(rctx->sg, 2, req->src);
+			req->src = rctx->sg;
+		}
+	} else
+		sg_mark_end(sg_last);
+
+	req->nbytes = nbytes;
+	rctx->trailing_buf_len = trailing_buf_len;
+
+	ret =  _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+
+	return ret;
+};
+
+static int _sha1_update(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+
+	if (cp->ce_support.aligned_only) {
+		if (_copy_source(req))
+			return -ENOMEM;
+	}
+	rctx->count += req->nbytes;
+	return _sha_update(req, SHA1_BLOCK_SIZE);
+}
+
+static int _sha256_update(struct ahash_request  *req)
+{
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+
+	if (cp->ce_support.aligned_only) {
+		if (_copy_source(req))
+			return -ENOMEM;
+	}
+
+	rctx->count += req->nbytes;
+	return _sha_update(req, SHA256_BLOCK_SIZE);
+}
+
+static int _sha_final(struct ahash_request *req, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	int ret = 0;
+	uint8_t  *staging;
+
+	if (cp->ce_support.aligned_only) {
+		if (_copy_source(req))
+			return -ENOMEM;
+	}
+
+	rctx->last_blk = 1;
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+
+	staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+							L1_CACHE_BYTES);
+	memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+	sg_set_buf(&rctx->sg[0], staging, rctx->trailing_buf_len);
+	sg_mark_end(&rctx->sg[0]);
+
+	req->src = &rctx->sg[0];
+	req->nbytes = rctx->trailing_buf_len;
+
+	ret =  _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+
+	return ret;
+};
+
+static int _sha1_final(struct ahash_request  *req)
+{
+	return _sha_final(req, SHA1_BLOCK_SIZE);
+}
+
+static int _sha256_final(struct ahash_request  *req)
+{
+	return _sha_final(req, SHA256_BLOCK_SIZE);
+}
+
+static int _sha_digest(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+
+	if (cp->ce_support.aligned_only) {
+		if (_copy_source(req))
+			return -ENOMEM;
+	}
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+	rctx->first_blk = 1;
+	rctx->last_blk = 1;
+	ret =  _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+
+	return ret;
+}
+
+static int _sha1_digest(struct ahash_request *req)
+{
+	_sha1_init(req);
+	return _sha_digest(req);
+}
+
+static int _sha256_digest(struct ahash_request *req)
+{
+	_sha256_init(req);
+	return _sha_digest(req);
+}
+
+static void _crypto_sha_hmac_ahash_req_complete(
+	struct crypto_async_request *req, int err)
+{
+	struct completion *ahash_req_complete = req->data;
+
+	if (err == -EINPROGRESS)
+		return;
+	complete(ahash_req_complete);
+}
+
+static int _sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+		unsigned int len)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+	uint8_t	*in_buf;
+	int ret = 0;
+	struct scatterlist sg;
+	struct ahash_request *ahash_req;
+	struct completion ahash_req_complete;
+
+	ahash_req = ahash_request_alloc(tfm, GFP_KERNEL);
+	if (ahash_req == NULL)
+		return -ENOMEM;
+	init_completion(&ahash_req_complete);
+	ahash_request_set_callback(ahash_req,
+				CRYPTO_TFM_REQ_MAY_BACKLOG,
+				_crypto_sha_hmac_ahash_req_complete,
+				&ahash_req_complete);
+	crypto_ahash_clear_flags(tfm, ~0);
+
+	in_buf = kzalloc(len + 64, GFP_KERNEL);
+	if (in_buf == NULL) {
+		ahash_request_free(ahash_req);
+		return -ENOMEM;
+	}
+	memcpy(in_buf, key, len);
+	sg_set_buf(&sg, in_buf, len);
+	sg_mark_end(&sg);
+
+	ahash_request_set_crypt(ahash_req, &sg,
+				&sha_ctx->authkey[0], len);
+
+	if (sha_ctx->alg == QCE_HASH_SHA1)
+		ret = _sha1_digest(ahash_req);
+	else
+		ret = _sha256_digest(ahash_req);
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		ret =
+			wait_for_completion_interruptible(
+						&ahash_req_complete);
+		reinit_completion(&sha_ctx->ahash_req_complete);
+	}
+
+	kzfree(in_buf);
+	ahash_request_free(ahash_req);
+
+	return ret;
+}
+
+static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+							unsigned int len)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+
+	memset(&sha_ctx->authkey[0], 0, SHA1_BLOCK_SIZE);
+	if (len <= SHA1_BLOCK_SIZE) {
+		memcpy(&sha_ctx->authkey[0], key, len);
+		sha_ctx->authkey_in_len = len;
+	} else {
+		sha_ctx->alg = QCE_HASH_SHA1;
+		sha_ctx->diglen = SHA1_DIGEST_SIZE;
+		_sha_hmac_setkey(tfm, key, len);
+		sha_ctx->authkey_in_len = SHA1_BLOCK_SIZE;
+	}
+	return 0;
+}
+
+static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key,
+							unsigned int len)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base);
+
+	memset(&sha_ctx->authkey[0], 0, SHA256_BLOCK_SIZE);
+	if (len <= SHA256_BLOCK_SIZE) {
+		memcpy(&sha_ctx->authkey[0], key, len);
+		sha_ctx->authkey_in_len = len;
+	} else {
+		sha_ctx->alg = QCE_HASH_SHA256;
+		sha_ctx->diglen = SHA256_DIGEST_SIZE;
+		_sha_hmac_setkey(tfm, key, len);
+		sha_ctx->authkey_in_len = SHA256_BLOCK_SIZE;
+	}
+
+	return 0;
+}
+
+static int _sha_hmac_init_ihash(struct ahash_request *req,
+						uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	int i;
+
+	for (i = 0; i < sha_block_size; i++)
+		rctx->trailing_buf[i] = sha_ctx->authkey[i] ^ 0x36;
+	rctx->trailing_buf_len = sha_block_size;
+
+	return 0;
+}
+
+static int _sha1_hmac_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+	int ret = 0;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+	pstat->sha1_hmac_digest++;
+
+	_sha_init(req);
+	memset(&rctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+						SHA1_DIGEST_SIZE);
+	sha_ctx->diglen = SHA1_DIGEST_SIZE;
+
+	if (cp->ce_support.sha_hmac)
+		sha_ctx->alg = QCE_HASH_SHA1_HMAC;
+	else {
+		sha_ctx->alg = QCE_HASH_SHA1;
+		ret = _sha_hmac_init_ihash(req, SHA1_BLOCK_SIZE);
+	}
+
+	return ret;
+}
+
+static int _sha256_hmac_init(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	struct crypto_stat *pstat;
+	int ret = 0;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+	pstat->sha256_hmac_digest++;
+
+	_sha_init(req);
+
+	memset(&rctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+						SHA256_DIGEST_SIZE);
+	sha_ctx->diglen = SHA256_DIGEST_SIZE;
+
+	if (cp->ce_support.sha_hmac)
+		sha_ctx->alg = QCE_HASH_SHA256_HMAC;
+	else {
+		sha_ctx->alg = QCE_HASH_SHA256;
+		ret = _sha_hmac_init_ihash(req, SHA256_BLOCK_SIZE);
+	}
+
+	return ret;
+}
+
+static int _sha1_hmac_update(struct ahash_request *req)
+{
+	return _sha1_update(req);
+}
+
+static int _sha256_hmac_update(struct ahash_request *req)
+{
+	return _sha256_update(req);
+}
+
+static int _sha_hmac_outer_hash(struct ahash_request *req,
+		uint32_t sha_digest_size, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int i;
+	uint8_t  *staging;
+	uint8_t *p;
+
+	staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+							L1_CACHE_BYTES);
+	p = staging;
+	for (i = 0; i < sha_block_size; i++)
+		*p++ = sha_ctx->authkey[i] ^ 0x5c;
+	memcpy(p, &rctx->digest[0], sha_digest_size);
+	sg_set_buf(&rctx->sg[0], staging, sha_block_size +
+							sha_digest_size);
+	sg_mark_end(&rctx->sg[0]);
+
+	/* save the original req structure fields*/
+	rctx->src = req->src;
+	rctx->nbytes = req->nbytes;
+
+	req->src = &rctx->sg[0];
+	req->nbytes = sha_block_size + sha_digest_size;
+
+	_sha_init(req);
+	if (sha_ctx->alg == QCE_HASH_SHA1) {
+		memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+							SHA1_DIGEST_SIZE);
+		sha_ctx->diglen = SHA1_DIGEST_SIZE;
+	} else {
+		memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+							SHA256_DIGEST_SIZE);
+		sha_ctx->diglen = SHA256_DIGEST_SIZE;
+	}
+
+	rctx->last_blk = 1;
+	return  _qcrypto_queue_req(cp, sha_ctx->pengine, &req->base);
+}
+
+static int _sha_hmac_inner_hash(struct ahash_request *req,
+			uint32_t sha_digest_size, uint32_t sha_block_size)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct ahash_request *areq = sha_ctx->ahash_req;
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+	uint8_t  *staging;
+
+	staging = (uint8_t *)ALIGN(((uintptr_t)rctx->staging_dmabuf),
+							L1_CACHE_BYTES);
+	memcpy(staging, rctx->trailing_buf, rctx->trailing_buf_len);
+	sg_set_buf(&rctx->sg[0], staging, rctx->trailing_buf_len);
+	sg_mark_end(&rctx->sg[0]);
+
+	ahash_request_set_crypt(areq, &rctx->sg[0], &rctx->digest[0],
+						rctx->trailing_buf_len);
+	rctx->last_blk = 1;
+	ret =  _qcrypto_queue_req(cp, sha_ctx->pengine, &areq->base);
+
+	if (ret == -EINPROGRESS || ret == -EBUSY) {
+		ret =
+		wait_for_completion_interruptible(&sha_ctx->ahash_req_complete);
+		reinit_completion(&sha_ctx->ahash_req_complete);
+	}
+
+	return ret;
+}
+
+static int _sha1_hmac_final(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+
+	if (cp->ce_support.sha_hmac)
+		return _sha_final(req, SHA1_BLOCK_SIZE);
+	ret = _sha_hmac_inner_hash(req, SHA1_DIGEST_SIZE, SHA1_BLOCK_SIZE);
+	if (ret)
+		return ret;
+	return _sha_hmac_outer_hash(req, SHA1_DIGEST_SIZE, SHA1_BLOCK_SIZE);
+}
+
+static int _sha256_hmac_final(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = sha_ctx->cp;
+	int ret = 0;
+
+	if (cp->ce_support.sha_hmac)
+		return _sha_final(req, SHA256_BLOCK_SIZE);
+
+	ret = _sha_hmac_inner_hash(req, SHA256_DIGEST_SIZE, SHA256_BLOCK_SIZE);
+	if (ret)
+		return ret;
+
+	return _sha_hmac_outer_hash(req, SHA256_DIGEST_SIZE, SHA256_BLOCK_SIZE);
+}
+
+
+static int _sha1_hmac_digest(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_stat *pstat;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+	pstat->sha1_hmac_digest++;
+
+	_sha_init(req);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha1_uint8[0],
+							SHA1_DIGEST_SIZE);
+	sha_ctx->diglen = SHA1_DIGEST_SIZE;
+	sha_ctx->alg = QCE_HASH_SHA1_HMAC;
+
+	return _sha_digest(req);
+}
+
+static int _sha256_hmac_digest(struct ahash_request *req)
+{
+	struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_stat *pstat;
+	struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req);
+
+	pstat = &_qcrypto_stat;
+	pstat->sha256_hmac_digest++;
+
+	_sha_init(req);
+	memcpy(&rctx->digest[0], &_std_init_vector_sha256_uint8[0],
+						SHA256_DIGEST_SIZE);
+	sha_ctx->diglen = SHA256_DIGEST_SIZE;
+	sha_ctx->alg = QCE_HASH_SHA256_HMAC;
+
+	return _sha_digest(req);
+}
+
+static int _qcrypto_prefix_alg_cra_name(char cra_name[], unsigned int size)
+{
+	char new_cra_name[CRYPTO_MAX_ALG_NAME] = "qcom-";
+
+	if (size >= CRYPTO_MAX_ALG_NAME - strlen("qcom-"))
+		return -EINVAL;
+	strlcat(new_cra_name, cra_name, CRYPTO_MAX_ALG_NAME);
+	strlcpy(cra_name, new_cra_name, CRYPTO_MAX_ALG_NAME);
+	return 0;
+}
+
+
+int qcrypto_cipher_set_device(struct ablkcipher_request *req, unsigned int dev)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_engine *pengine = NULL;
+
+	pengine = _qrypto_find_pengine_device(cp, dev);
+	if (pengine == NULL)
+		return -ENODEV;
+	ctx->pengine = pengine;
+
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_cipher_set_device);
+
+int qcrypto_cipher_set_device_hw(struct ablkcipher_request *req, u32 dev,
+			u32 hw_inst)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_engine *pengine = NULL;
+
+	pengine = _qrypto_find_pengine_device_hw(cp, dev, hw_inst);
+	if (pengine == NULL)
+		return -ENODEV;
+	ctx->pengine = pengine;
+
+	return 0;
+}
+EXPORT_SYMBOL(qcrypto_cipher_set_device_hw);
+
+int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_engine *pengine = NULL;
+
+	pengine = _qrypto_find_pengine_device(cp, dev);
+	if (pengine == NULL)
+		return -ENODEV;
+	ctx->pengine = pengine;
+
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_aead_set_device);
+
+int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev)
+{
+	struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+	struct crypto_engine *pengine = NULL;
+
+	pengine = _qrypto_find_pengine_device(cp, dev);
+	if (pengine == NULL)
+		return -ENODEV;
+	ctx->pengine = pengine;
+
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_ahash_set_device);
+
+int qcrypto_cipher_set_flag(struct ablkcipher_request *req, unsigned int flags)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
+		(cp->platform_support.hw_key_support == false)) {
+		pr_err("%s HW key usage not supported\n", __func__);
+		return -EINVAL;
+	}
+	if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
+						QCRYPTO_CTX_KEY_MASK) {
+		pr_err("%s Cannot set all key flags\n", __func__);
+		return -EINVAL;
+	}
+
+	ctx->flags |= flags;
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_cipher_set_flag);
+
+int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
+		(cp->platform_support.hw_key_support == false)) {
+		pr_err("%s HW key usage not supported\n", __func__);
+		return -EINVAL;
+	}
+	if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
+						QCRYPTO_CTX_KEY_MASK) {
+		pr_err("%s Cannot set all key flags\n", __func__);
+		return -EINVAL;
+	}
+
+	ctx->flags |= flags;
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_aead_set_flag);
+
+int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags)
+{
+	struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+	struct crypto_priv *cp = ctx->cp;
+
+	if ((flags & QCRYPTO_CTX_USE_HW_KEY) &&
+		(cp->platform_support.hw_key_support == false)) {
+		pr_err("%s HW key usage not supported\n", __func__);
+		return -EINVAL;
+	}
+	if (((flags | ctx->flags) & QCRYPTO_CTX_KEY_MASK) ==
+						QCRYPTO_CTX_KEY_MASK) {
+		pr_err("%s Cannot set all key flags\n", __func__);
+		return -EINVAL;
+	}
+
+	ctx->flags |= flags;
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_ahash_set_flag);
+
+int qcrypto_cipher_clear_flag(struct ablkcipher_request *req,
+							unsigned int flags)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+	ctx->flags &= ~flags;
+	return 0;
+
+};
+EXPORT_SYMBOL(qcrypto_cipher_clear_flag);
+
+int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags)
+{
+	struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+	ctx->flags &= ~flags;
+	return 0;
+
+};
+EXPORT_SYMBOL(qcrypto_aead_clear_flag);
+
+int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags)
+{
+	struct qcrypto_sha_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
+
+	ctx->flags &= ~flags;
+	return 0;
+};
+EXPORT_SYMBOL(qcrypto_ahash_clear_flag);
+
+static struct ahash_alg _qcrypto_ahash_algos[] = {
+	{
+		.init		=	_sha1_init,
+		.update		=	_sha1_update,
+		.final		=	_sha1_final,
+		.export		=	_sha1_export,
+		.import		=	_sha1_import,
+		.digest		=	_sha1_digest,
+		.halg		= {
+			.digestsize	= SHA1_DIGEST_SIZE,
+			.statesize	= sizeof(struct sha1_state),
+			.base	= {
+				.cra_name	 = "sha1",
+				.cra_driver_name = "qcrypto-sha1",
+				.cra_priority	 = 300,
+				.cra_flags	 = CRYPTO_ALG_TYPE_AHASH |
+							 CRYPTO_ALG_ASYNC,
+				.cra_blocksize	 = SHA1_BLOCK_SIZE,
+				.cra_ctxsize	 =
+						sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask	 = 0,
+				.cra_type	 = &crypto_ahash_type,
+				.cra_module	 = THIS_MODULE,
+				.cra_init	 = _qcrypto_ahash_cra_init,
+				.cra_exit	 = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+	{
+		.init		=	_sha256_init,
+		.update		=	_sha256_update,
+		.final		=	_sha256_final,
+		.export		=	_sha256_export,
+		.import		=	_sha256_import,
+		.digest		=	_sha256_digest,
+		.halg		= {
+			.digestsize	= SHA256_DIGEST_SIZE,
+			.statesize	= sizeof(struct sha256_state),
+			.base		= {
+				.cra_name	 = "sha256",
+				.cra_driver_name = "qcrypto-sha256",
+				.cra_priority	 = 300,
+				.cra_flags	 = CRYPTO_ALG_TYPE_AHASH |
+							CRYPTO_ALG_ASYNC,
+				.cra_blocksize	 = SHA256_BLOCK_SIZE,
+				.cra_ctxsize	 =
+						sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask	 = 0,
+				.cra_type	 = &crypto_ahash_type,
+				.cra_module	 = THIS_MODULE,
+				.cra_init	 = _qcrypto_ahash_cra_init,
+				.cra_exit	 = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+};
+
+static struct ahash_alg _qcrypto_sha_hmac_algos[] = {
+	{
+		.init		=	_sha1_hmac_init,
+		.update		=	_sha1_hmac_update,
+		.final		=	_sha1_hmac_final,
+		.export		=	_sha1_hmac_export,
+		.import		=	_sha1_hmac_import,
+		.digest		=	_sha1_hmac_digest,
+		.setkey		=	_sha1_hmac_setkey,
+		.halg		= {
+			.digestsize	= SHA1_DIGEST_SIZE,
+			.statesize	= sizeof(struct sha1_state),
+			.base	= {
+				.cra_name	 = "hmac(sha1)",
+				.cra_driver_name = "qcrypto-hmac-sha1",
+				.cra_priority	 = 300,
+				.cra_flags	 = CRYPTO_ALG_TYPE_AHASH |
+							 CRYPTO_ALG_ASYNC,
+				.cra_blocksize	 = SHA1_BLOCK_SIZE,
+				.cra_ctxsize	 =
+						sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask	 = 0,
+				.cra_type	 = &crypto_ahash_type,
+				.cra_module	 = THIS_MODULE,
+				.cra_init	 = _qcrypto_ahash_hmac_cra_init,
+				.cra_exit	 = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+	{
+		.init		=	_sha256_hmac_init,
+		.update		=	_sha256_hmac_update,
+		.final		=	_sha256_hmac_final,
+		.export		=	_sha256_hmac_export,
+		.import		=	_sha256_hmac_import,
+		.digest		=	_sha256_hmac_digest,
+		.setkey		=	_sha256_hmac_setkey,
+		.halg		= {
+			.digestsize	= SHA256_DIGEST_SIZE,
+			.statesize	= sizeof(struct sha256_state),
+			.base		= {
+				.cra_name	 = "hmac(sha256)",
+				.cra_driver_name = "qcrypto-hmac-sha256",
+				.cra_priority	 = 300,
+				.cra_flags	 = CRYPTO_ALG_TYPE_AHASH |
+							CRYPTO_ALG_ASYNC,
+				.cra_blocksize	 = SHA256_BLOCK_SIZE,
+				.cra_ctxsize	 =
+						sizeof(struct qcrypto_sha_ctx),
+				.cra_alignmask	 = 0,
+				.cra_type	 = &crypto_ahash_type,
+				.cra_module	 = THIS_MODULE,
+				.cra_init	 = _qcrypto_ahash_hmac_cra_init,
+				.cra_exit	 = _qcrypto_ahash_cra_exit,
+			},
+		},
+	},
+};
+
+static struct crypto_alg _qcrypto_ablk_cipher_algos[] = {
+	{
+		.cra_name		= "ecb(aes)",
+		.cra_driver_name	= "qcrypto-ecb-aes",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_NEED_FALLBACK |
+					CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_aes_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_aes_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_aes,
+				.encrypt	= _qcrypto_enc_aes_ecb,
+				.decrypt	= _qcrypto_dec_aes_ecb,
+			},
+		},
+	},
+	{
+		.cra_name	= "cbc(aes)",
+		.cra_driver_name = "qcrypto-cbc-aes",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_NEED_FALLBACK |
+					CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_aes_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_aes_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.ivsize		= AES_BLOCK_SIZE,
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_aes,
+				.encrypt	= _qcrypto_enc_aes_cbc,
+				.decrypt	= _qcrypto_dec_aes_cbc,
+			},
+		},
+	},
+	{
+		.cra_name	= "ctr(aes)",
+		.cra_driver_name = "qcrypto-ctr-aes",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER |
+					CRYPTO_ALG_NEED_FALLBACK |
+					CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_aes_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_aes_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.ivsize		= AES_BLOCK_SIZE,
+				.min_keysize	= AES_MIN_KEY_SIZE,
+				.max_keysize	= AES_MAX_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_aes,
+				.encrypt	= _qcrypto_enc_aes_ctr,
+				.decrypt	= _qcrypto_dec_aes_ctr,
+			},
+		},
+	},
+	{
+		.cra_name		= "ecb(des)",
+		.cra_driver_name	= "qcrypto-ecb-des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= DES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.min_keysize	= DES_KEY_SIZE,
+				.max_keysize	= DES_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_des,
+				.encrypt	= _qcrypto_enc_des_ecb,
+				.decrypt	= _qcrypto_dec_des_ecb,
+			},
+		},
+	},
+	{
+		.cra_name	= "cbc(des)",
+		.cra_driver_name = "qcrypto-cbc-des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= DES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.ivsize		= DES_BLOCK_SIZE,
+				.min_keysize	= DES_KEY_SIZE,
+				.max_keysize	= DES_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_des,
+				.encrypt	= _qcrypto_enc_des_cbc,
+				.decrypt	= _qcrypto_dec_des_cbc,
+			},
+		},
+	},
+	{
+		.cra_name		= "ecb(des3_ede)",
+		.cra_driver_name	= "qcrypto-ecb-3des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.min_keysize	= DES3_EDE_KEY_SIZE,
+				.max_keysize	= DES3_EDE_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_3des,
+				.encrypt	= _qcrypto_enc_3des_ecb,
+				.decrypt	= _qcrypto_dec_3des_ecb,
+			},
+		},
+	},
+	{
+		.cra_name	= "cbc(des3_ede)",
+		.cra_driver_name = "qcrypto-cbc-3des",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+		.cra_blocksize	= DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_type	= &crypto_ablkcipher_type,
+		.cra_module	= THIS_MODULE,
+		.cra_init	= _qcrypto_cra_ablkcipher_init,
+		.cra_exit	= _qcrypto_cra_ablkcipher_exit,
+		.cra_u		= {
+			.ablkcipher = {
+				.ivsize		= DES3_EDE_BLOCK_SIZE,
+				.min_keysize	= DES3_EDE_KEY_SIZE,
+				.max_keysize	= DES3_EDE_KEY_SIZE,
+				.setkey		= _qcrypto_setkey_3des,
+				.encrypt	= _qcrypto_enc_3des_cbc,
+				.decrypt	= _qcrypto_dec_3des_cbc,
+			},
+		},
+	},
+};
+
+static struct crypto_alg _qcrypto_ablk_cipher_xts_algo = {
+	.cra_name	= "xts(aes)",
+	.cra_driver_name = "qcrypto-xts-aes",
+	.cra_priority	= 300,
+	.cra_flags	= CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+	.cra_blocksize	= AES_BLOCK_SIZE,
+	.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+	.cra_alignmask	= 0,
+	.cra_type	= &crypto_ablkcipher_type,
+	.cra_module	= THIS_MODULE,
+	.cra_init	= _qcrypto_cra_ablkcipher_init,
+	.cra_exit	= _qcrypto_cra_ablkcipher_exit,
+	.cra_u		= {
+		.ablkcipher = {
+			.ivsize		= AES_BLOCK_SIZE,
+			.min_keysize	= AES_MIN_KEY_SIZE,
+			.max_keysize	= AES_MAX_KEY_SIZE,
+			.setkey		= _qcrypto_setkey_aes_xts,
+			.encrypt	= _qcrypto_enc_aes_xts,
+			.decrypt	= _qcrypto_dec_aes_xts,
+		},
+	},
+};
+
+static struct aead_alg _qcrypto_aead_sha1_hmac_algos[] = {
+	{
+		.base = {
+		.cra_name	= "authenc(hmac(sha1),cbc(aes))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-aes",
+		.cra_priority	= 300,
+			.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+		},
+				.ivsize         = AES_BLOCK_SIZE,
+				.maxauthsize    = SHA1_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_aes_cbc,
+				.decrypt = _qcrypto_aead_decrypt_aes_cbc,
+		.init	= _qcrypto_cra_aead_aes_sha1_init,
+		.exit	= _qcrypto_cra_aead_aes_exit,
+	},
+	{
+		.base = {
+		.cra_name	= "authenc(hmac(sha1),cbc(des))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-des",
+		.cra_priority	= 300,
+			.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = DES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+		},
+				.ivsize         = DES_BLOCK_SIZE,
+				.maxauthsize    = SHA1_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_des_cbc,
+				.decrypt = _qcrypto_aead_decrypt_des_cbc,
+		.init	= _qcrypto_cra_aead_sha1_init,
+		.exit	= _qcrypto_cra_aead_exit,
+	},
+	{
+		.base = {
+		.cra_name	= "authenc(hmac(sha1),cbc(des3_ede))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-3des",
+		.cra_priority	= 300,
+			.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+		},
+				.ivsize         = DES3_EDE_BLOCK_SIZE,
+				.maxauthsize    = SHA1_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_3des_cbc,
+				.decrypt = _qcrypto_aead_decrypt_3des_cbc,
+		.init	= _qcrypto_cra_aead_sha1_init,
+		.exit	= _qcrypto_cra_aead_exit,
+	},
+};
+
+static struct aead_alg _qcrypto_aead_sha256_hmac_algos[] = {
+	{
+		.base = {
+		.cra_name	= "authenc(hmac(sha256),cbc(aes))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-aes",
+		.cra_priority	= 300,
+			.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+		},
+				.ivsize         = AES_BLOCK_SIZE,
+				.maxauthsize    = SHA256_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_aes_cbc,
+				.decrypt = _qcrypto_aead_decrypt_aes_cbc,
+		.init	= _qcrypto_cra_aead_aes_sha256_init,
+		.exit	= _qcrypto_cra_aead_aes_exit,
+	},
+
+	{
+		.base = {
+		.cra_name	= "authenc(hmac(sha256),cbc(des))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-des",
+		.cra_priority	= 300,
+			.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = DES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+		},
+				.ivsize         = DES_BLOCK_SIZE,
+				.maxauthsize    = SHA256_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_des_cbc,
+				.decrypt = _qcrypto_aead_decrypt_des_cbc,
+		.init	= _qcrypto_cra_aead_sha256_init,
+		.exit	= _qcrypto_cra_aead_exit,
+	},
+	{
+		.base = {
+		.cra_name	= "authenc(hmac(sha256),cbc(des3_ede))",
+		.cra_driver_name = "qcrypto-aead-hmac-sha256-cbc-3des",
+		.cra_priority	= 300,
+			.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = DES3_EDE_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+		},
+				.ivsize         = DES3_EDE_BLOCK_SIZE,
+				.maxauthsize    = SHA256_DIGEST_SIZE,
+				.setkey = _qcrypto_aead_setkey,
+				.setauthsize = _qcrypto_aead_setauthsize,
+				.encrypt = _qcrypto_aead_encrypt_3des_cbc,
+				.decrypt = _qcrypto_aead_decrypt_3des_cbc,
+		.init	= _qcrypto_cra_aead_sha256_init,
+		.exit	= _qcrypto_cra_aead_exit,
+	},
+};
+
+static struct aead_alg _qcrypto_aead_ccm_algo = {
+	.base = {
+		.cra_name	= "ccm(aes)",
+		.cra_driver_name = "qcrypto-aes-ccm",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = AES_BLOCK_SIZE,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+	},
+	.ivsize         = AES_BLOCK_SIZE,
+	.maxauthsize    = AES_BLOCK_SIZE,
+	.setkey = _qcrypto_aead_ccm_setkey,
+	.setauthsize = _qcrypto_aead_ccm_setauthsize,
+	.encrypt = _qcrypto_aead_encrypt_aes_ccm,
+	.decrypt = _qcrypto_aead_decrypt_aes_ccm,
+	.init	= _qcrypto_cra_aead_ccm_init,
+	.exit	= _qcrypto_cra_aead_exit,
+};
+
+static struct aead_alg _qcrypto_aead_rfc4309_ccm_algo = {
+	.base = {
+		.cra_name	= "rfc4309(ccm(aes))",
+		.cra_driver_name = "qcrypto-rfc4309-aes-ccm",
+		.cra_priority	= 300,
+		.cra_flags	= CRYPTO_ALG_ASYNC,
+		.cra_blocksize  = 1,
+		.cra_ctxsize	= sizeof(struct qcrypto_cipher_ctx),
+		.cra_alignmask	= 0,
+		.cra_module	= THIS_MODULE,
+	},
+	.ivsize         = 8,
+	.maxauthsize    = 16,
+	.setkey = _qcrypto_aead_rfc4309_ccm_setkey,
+	.setauthsize = _qcrypto_aead_rfc4309_ccm_setauthsize,
+	.encrypt = _qcrypto_aead_rfc4309_enc_aes_ccm,
+	.decrypt = _qcrypto_aead_rfc4309_dec_aes_ccm,
+	.init	= _qcrypto_cra_aead_rfc4309_ccm_init,
+	.exit	= _qcrypto_cra_aead_exit,
+};
+
+static int  _qcrypto_probe(struct platform_device *pdev)
+{
+	int rc = 0;
+	void *handle;
+	struct crypto_priv *cp = &qcrypto_dev;
+	int i;
+	struct msm_ce_hw_support *platform_support;
+	struct crypto_engine *pengine;
+	unsigned long flags;
+	struct qcrypto_req_control *pqcrypto_req_control = NULL;
+
+	pengine = kzalloc(sizeof(*pengine), GFP_KERNEL);
+	if (!pengine)
+		return -ENOMEM;
+
+	/* open qce */
+	handle = qce_open(pdev, &rc);
+	if (handle == NULL) {
+		kzfree(pengine);
+		platform_set_drvdata(pdev, NULL);
+		return rc;
+	}
+
+	platform_set_drvdata(pdev, pengine);
+	pengine->qce = handle;
+	pengine->pcp = cp;
+	pengine->pdev = pdev;
+	pengine->signature = 0xdeadbeef;
+
+	init_timer(&(pengine->bw_reaper_timer));
+	INIT_WORK(&pengine->bw_reaper_ws, qcrypto_bw_reaper_work);
+	pengine->bw_reaper_timer.function =
+			qcrypto_bw_reaper_timer_callback;
+	INIT_WORK(&pengine->bw_allocate_ws, qcrypto_bw_allocate_work);
+	pengine->high_bw_req = false;
+	pengine->active_seq = 0;
+	pengine->last_active_seq = 0;
+	pengine->check_flag = false;
+	pengine->max_req_used = 0;
+	pengine->issue_req = false;
+
+	crypto_init_queue(&pengine->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH);
+
+	mutex_lock(&cp->engine_lock);
+	cp->total_units++;
+	pengine->unit = cp->total_units;
+
+	spin_lock_irqsave(&cp->lock, flags);
+	pengine->first_engine = list_empty(&cp->engine_list);
+	if (pengine->first_engine)
+		cp->first_engine = pengine;
+	list_add_tail(&pengine->elist, &cp->engine_list);
+	cp->next_engine = pengine;
+	spin_unlock_irqrestore(&cp->lock, flags);
+
+	qce_hw_support(pengine->qce, &cp->ce_support);
+	pengine->ce_hw_instance = cp->ce_support.ce_hw_instance;
+	pengine->max_req = cp->ce_support.max_request;
+	pqcrypto_req_control = kzalloc(sizeof(struct qcrypto_req_control) *
+			pengine->max_req, GFP_KERNEL);
+	if (pqcrypto_req_control == NULL) {
+		rc = -ENOMEM;
+		goto err;
+	}
+	qcrypto_init_req_control(pengine, pqcrypto_req_control);
+	if (cp->ce_support.bam)	 {
+		cp->platform_support.ce_shared = cp->ce_support.is_shared;
+		cp->platform_support.shared_ce_resource = 0;
+		cp->platform_support.hw_key_support = cp->ce_support.hw_key;
+		cp->platform_support.sha_hmac = 1;
+
+		cp->platform_support.bus_scale_table =
+			(struct msm_bus_scale_pdata *)
+					msm_bus_cl_get_pdata(pdev);
+		if (!cp->platform_support.bus_scale_table)
+			pr_warn("bus_scale_table is NULL\n");
+
+		pengine->ce_device = cp->ce_support.ce_device;
+
+	} else {
+		platform_support =
+			(struct msm_ce_hw_support *)pdev->dev.platform_data;
+		cp->platform_support.ce_shared = platform_support->ce_shared;
+		cp->platform_support.shared_ce_resource =
+				platform_support->shared_ce_resource;
+		cp->platform_support.hw_key_support =
+				platform_support->hw_key_support;
+		cp->platform_support.bus_scale_table =
+				platform_support->bus_scale_table;
+		cp->platform_support.sha_hmac = platform_support->sha_hmac;
+	}
+
+	pengine->bus_scale_handle = 0;
+
+	if (cp->platform_support.bus_scale_table != NULL) {
+		pengine->bus_scale_handle =
+			msm_bus_scale_register_client(
+				(struct msm_bus_scale_pdata *)
+					cp->platform_support.bus_scale_table);
+		if (!pengine->bus_scale_handle) {
+			pr_err("%s not able to get bus scale\n",
+				__func__);
+			rc =  -ENOMEM;
+			goto err;
+		}
+		pengine->bw_state = BUS_NO_BANDWIDTH;
+	} else {
+		pengine->bw_state = BUS_HAS_BANDWIDTH;
+	}
+
+	if (cp->total_units != 1) {
+		mutex_unlock(&cp->engine_lock);
+		return 0;
+	}
+
+	/* register crypto cipher algorithms the device supports */
+	for (i = 0; i < ARRAY_SIZE(_qcrypto_ablk_cipher_algos); i++) {
+		struct qcrypto_alg *q_alg;
+
+		q_alg = _qcrypto_cipher_alg_alloc(cp,
+					&_qcrypto_ablk_cipher_algos[i]);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		if (cp->ce_support.use_sw_aes_cbc_ecb_ctr_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->cipher_alg.cra_name,
+					strlen(q_alg->cipher_alg.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+					"The algorithm name %s is too long.\n",
+					q_alg->cipher_alg.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_alg(&q_alg->cipher_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->cipher_alg.cra_driver_name);
+			kzfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->cipher_alg.cra_driver_name);
+		}
+	}
+
+	/* register crypto cipher algorithms the device supports */
+	if (cp->ce_support.aes_xts) {
+		struct qcrypto_alg *q_alg;
+
+		q_alg = _qcrypto_cipher_alg_alloc(cp,
+					&_qcrypto_ablk_cipher_xts_algo);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		if (cp->ce_support.use_sw_aes_xts_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->cipher_alg.cra_name,
+					strlen(q_alg->cipher_alg.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+					"The algorithm name %s is too long.\n",
+					q_alg->cipher_alg.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_alg(&q_alg->cipher_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->cipher_alg.cra_driver_name);
+			kzfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->cipher_alg.cra_driver_name);
+		}
+	}
+
+	/*
+	 * Register crypto hash (sha1 and sha256) algorithms the
+	 * device supports
+	 */
+	for (i = 0; i < ARRAY_SIZE(_qcrypto_ahash_algos); i++) {
+		struct qcrypto_alg *q_alg = NULL;
+
+		q_alg = _qcrypto_sha_alg_alloc(cp, &_qcrypto_ahash_algos[i]);
+
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		if (cp->ce_support.use_sw_ahash_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+				q_alg->sha_alg.halg.base.cra_name,
+				strlen(q_alg->sha_alg.halg.base.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+					"The algorithm name %s is too long.\n",
+					q_alg->sha_alg.halg.base.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_ahash(&q_alg->sha_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+			kzfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+		}
+	}
+
+	/* register crypto aead (hmac-sha1) algorithms the device supports */
+	if (cp->ce_support.sha1_hmac_20 || cp->ce_support.sha1_hmac
+		|| cp->ce_support.sha_hmac) {
+		for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha1_hmac_algos);
+									i++) {
+			struct qcrypto_alg *q_alg;
+
+			q_alg = _qcrypto_aead_alg_alloc(cp,
+					&_qcrypto_aead_sha1_hmac_algos[i]);
+			if (IS_ERR(q_alg)) {
+				rc = PTR_ERR(q_alg);
+				goto err;
+			}
+			if (cp->ce_support.use_sw_aead_algo) {
+				rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->aead_alg.base.cra_name,
+					strlen(q_alg->aead_alg.base.cra_name));
+				if (rc) {
+					dev_err(&pdev->dev,
+						"The algorithm name %s is too long.\n",
+						q_alg->aead_alg.base.cra_name);
+					kfree(q_alg);
+					goto err;
+				}
+			}
+			rc = crypto_register_aead(&q_alg->aead_alg);
+			if (rc) {
+				dev_err(&pdev->dev,
+					"%s alg registration failed\n",
+					q_alg->aead_alg.base.cra_driver_name);
+				kfree(q_alg);
+			} else {
+				list_add_tail(&q_alg->entry, &cp->alg_list);
+				dev_info(&pdev->dev, "%s\n",
+					q_alg->aead_alg.base.cra_driver_name);
+			}
+		}
+	}
+
+	/* register crypto aead (hmac-sha256) algorithms the device supports */
+	if (cp->ce_support.sha_hmac) {
+		for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha256_hmac_algos);
+									i++) {
+			struct qcrypto_alg *q_alg;
+
+			q_alg = _qcrypto_aead_alg_alloc(cp,
+					&_qcrypto_aead_sha256_hmac_algos[i]);
+			if (IS_ERR(q_alg)) {
+				rc = PTR_ERR(q_alg);
+				goto err;
+			}
+			if (cp->ce_support.use_sw_aead_algo) {
+				rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->aead_alg.base.cra_name,
+					strlen(q_alg->aead_alg.base.cra_name));
+				if (rc) {
+					dev_err(&pdev->dev,
+						"The algorithm name %s is too long.\n",
+						q_alg->aead_alg.base.cra_name);
+					kfree(q_alg);
+					goto err;
+				}
+			}
+			rc = crypto_register_aead(&q_alg->aead_alg);
+			if (rc) {
+				dev_err(&pdev->dev,
+					"%s alg registration failed\n",
+					q_alg->aead_alg.base.cra_driver_name);
+				kfree(q_alg);
+			} else {
+				list_add_tail(&q_alg->entry, &cp->alg_list);
+				dev_info(&pdev->dev, "%s\n",
+					q_alg->aead_alg.base.cra_driver_name);
+			}
+		}
+	}
+
+	if ((cp->ce_support.sha_hmac) || (cp->platform_support.sha_hmac)) {
+		/* register crypto hmac algorithms the device supports */
+		for (i = 0; i < ARRAY_SIZE(_qcrypto_sha_hmac_algos); i++) {
+			struct qcrypto_alg *q_alg = NULL;
+
+			q_alg = _qcrypto_sha_alg_alloc(cp,
+						&_qcrypto_sha_hmac_algos[i]);
+
+			if (IS_ERR(q_alg)) {
+				rc = PTR_ERR(q_alg);
+				goto err;
+			}
+			if (cp->ce_support.use_sw_hmac_algo) {
+				rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->sha_alg.halg.base.cra_name,
+					strlen(
+					q_alg->sha_alg.halg.base.cra_name));
+				if (rc) {
+					dev_err(&pdev->dev,
+					     "The algorithm name %s is too long.\n",
+					     q_alg->sha_alg.halg.base.cra_name);
+					kfree(q_alg);
+					goto err;
+				}
+			}
+			rc = crypto_register_ahash(&q_alg->sha_alg);
+			if (rc) {
+				dev_err(&pdev->dev,
+				"%s alg registration failed\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+				kzfree(q_alg);
+			} else {
+				list_add_tail(&q_alg->entry, &cp->alg_list);
+				dev_info(&pdev->dev, "%s\n",
+				q_alg->sha_alg.halg.base.cra_driver_name);
+			}
+		}
+	}
+	/*
+	 * Register crypto cipher (aes-ccm) algorithms the
+	 * device supports
+	 */
+	if (cp->ce_support.aes_ccm) {
+		struct qcrypto_alg *q_alg;
+
+		q_alg = _qcrypto_aead_alg_alloc(cp, &_qcrypto_aead_ccm_algo);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+		if (cp->ce_support.use_sw_aes_ccm_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->aead_alg.base.cra_name,
+					strlen(q_alg->aead_alg.base.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+						"The algorithm name %s is too long.\n",
+						q_alg->aead_alg.base.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_aead(&q_alg->aead_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->aead_alg.base.cra_driver_name);
+			kzfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->aead_alg.base.cra_driver_name);
+		}
+
+		q_alg = _qcrypto_aead_alg_alloc(cp,
+					&_qcrypto_aead_rfc4309_ccm_algo);
+		if (IS_ERR(q_alg)) {
+			rc = PTR_ERR(q_alg);
+			goto err;
+		}
+
+		if (cp->ce_support.use_sw_aes_ccm_algo) {
+			rc = _qcrypto_prefix_alg_cra_name(
+					q_alg->aead_alg.base.cra_name,
+					strlen(q_alg->aead_alg.base.cra_name));
+			if (rc) {
+				dev_err(&pdev->dev,
+						"The algorithm name %s is too long.\n",
+						q_alg->aead_alg.base.cra_name);
+				kfree(q_alg);
+				goto err;
+			}
+		}
+		rc = crypto_register_aead(&q_alg->aead_alg);
+		if (rc) {
+			dev_err(&pdev->dev, "%s alg registration failed\n",
+					q_alg->aead_alg.base.cra_driver_name);
+			kfree(q_alg);
+		} else {
+			list_add_tail(&q_alg->entry, &cp->alg_list);
+			dev_info(&pdev->dev, "%s\n",
+					q_alg->aead_alg.base.cra_driver_name);
+		}
+	}
+	mutex_unlock(&cp->engine_lock);
+
+
+	return 0;
+err:
+	_qcrypto_remove_engine(pengine);
+	mutex_unlock(&cp->engine_lock);
+	if (pengine->qce)
+		qce_close(pengine->qce);
+	kzfree(pengine);
+	return rc;
+};
+
+static int _qcrypto_engine_in_use(struct crypto_engine *pengine)
+{
+	struct crypto_priv *cp = pengine->pcp;
+
+	if ((atomic_read(&pengine->req_count) > 0) || pengine->req_queue.qlen
+					|| cp->req_queue.qlen)
+		return 1;
+	return 0;
+}
+
+static void _qcrypto_do_suspending(struct crypto_engine *pengine)
+{
+	struct crypto_priv *cp = pengine->pcp;
+
+	if (cp->platform_support.bus_scale_table == NULL)
+		return;
+	del_timer_sync(&pengine->bw_reaper_timer);
+	qcrypto_ce_set_bus(pengine, false);
+}
+
+static int  _qcrypto_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	int ret = 0;
+	struct crypto_engine *pengine;
+	struct crypto_priv *cp;
+	unsigned long flags;
+
+	pengine = platform_get_drvdata(pdev);
+	if (!pengine)
+		return -EINVAL;
+
+	/*
+	 * Check if this platform supports clock management in suspend/resume
+	 * If not, just simply return 0.
+	 */
+	cp = pengine->pcp;
+	if (!cp->ce_support.clk_mgmt_sus_res)
+		return 0;
+	spin_lock_irqsave(&cp->lock, flags);
+	switch (pengine->bw_state) {
+	case BUS_NO_BANDWIDTH:
+		if (pengine->high_bw_req == false)
+			pengine->bw_state = BUS_SUSPENDED;
+		else
+			ret = -EBUSY;
+		break;
+	case BUS_HAS_BANDWIDTH:
+		if (_qcrypto_engine_in_use(pengine)) {
+			ret = -EBUSY;
+		} else {
+			pengine->bw_state = BUS_SUSPENDING;
+			spin_unlock_irqrestore(&cp->lock, flags);
+			_qcrypto_do_suspending(pengine);
+			spin_lock_irqsave(&cp->lock, flags);
+			pengine->bw_state = BUS_SUSPENDED;
+		}
+		break;
+	case BUS_BANDWIDTH_RELEASING:
+	case BUS_BANDWIDTH_ALLOCATING:
+	case BUS_SUSPENDED:
+	case BUS_SUSPENDING:
+	default:
+			ret = -EBUSY;
+			break;
+	}
+
+	spin_unlock_irqrestore(&cp->lock, flags);
+	if (ret)
+		return ret;
+	if (qce_pm_table.suspend)
+		qce_pm_table.suspend(pengine->qce);
+	return 0;
+}
+
+static int  _qcrypto_resume(struct platform_device *pdev)
+{
+	struct crypto_engine *pengine;
+	struct crypto_priv *cp;
+	unsigned long flags;
+	int ret = 0;
+
+	pengine = platform_get_drvdata(pdev);
+
+	if (!pengine)
+		return -EINVAL;
+	cp = pengine->pcp;
+	if (!cp->ce_support.clk_mgmt_sus_res)
+		return 0;
+	spin_lock_irqsave(&cp->lock, flags);
+	if (pengine->bw_state == BUS_SUSPENDED) {
+		spin_unlock_irqrestore(&cp->lock, flags);
+		if (qce_pm_table.resume)
+			qce_pm_table.resume(pengine->qce);
+
+		spin_lock_irqsave(&cp->lock, flags);
+		pengine->bw_state = BUS_NO_BANDWIDTH;
+		pengine->active_seq++;
+		pengine->check_flag = false;
+		if (cp->req_queue.qlen || pengine->req_queue.qlen) {
+			if (pengine->high_bw_req == false) {
+				qcrypto_ce_bw_allocate_req(pengine);
+				pengine->high_bw_req = true;
+			}
+		}
+	} else
+		ret = -EBUSY;
+
+	spin_unlock_irqrestore(&cp->lock, flags);
+	return ret;
+}
+
+static const struct of_device_id qcrypto_match[] = {
+	{	.compatible = "qcom,qcrypto",
+	},
+	{}
+};
+
+static struct platform_driver __qcrypto = {
+	.probe          = _qcrypto_probe,
+	.remove         = _qcrypto_remove,
+	.suspend        = _qcrypto_suspend,
+	.resume         = _qcrypto_resume,
+	.driver         = {
+		.owner  = THIS_MODULE,
+		.name   = "qcrypto",
+		.of_match_table = qcrypto_match,
+	},
+};
+
+static int _debug_qcrypto;
+
+static int _debug_stats_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t _debug_stats_read(struct file *file, char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	int rc = -EINVAL;
+	int qcrypto = *((int *) file->private_data);
+	int len;
+
+	len = _disp_stats(qcrypto);
+
+	if (len <= count)
+		rc = simple_read_from_buffer((void __user *) buf, len,
+			ppos, (void *) _debug_read_buf, len);
+	return rc;
+}
+
+static ssize_t _debug_stats_write(struct file *file, const char __user *buf,
+			size_t count, loff_t *ppos)
+{
+	unsigned long flags;
+	struct crypto_priv *cp = &qcrypto_dev;
+	struct crypto_engine *pe;
+	int i;
+
+	memset((char *)&_qcrypto_stat, 0, sizeof(struct crypto_stat));
+	spin_lock_irqsave(&cp->lock, flags);
+	list_for_each_entry(pe, &cp->engine_list, elist) {
+		pe->total_req = 0;
+		pe->err_req = 0;
+		qce_clear_driver_stats(pe->qce);
+		pe->max_req_used = 0;
+	}
+	cp->max_qlen = 0;
+	cp->resp_start = 0;
+	cp->resp_stop = 0;
+	cp->no_avail = 0;
+	cp->max_resp_qlen = 0;
+	cp->queue_work_eng3 = 0;
+	cp->queue_work_not_eng3 = 0;
+	cp->queue_work_not_eng3_nz = 0;
+	cp->max_reorder_cnt = 0;
+	for (i = 0; i < MAX_SMP_CPU + 1; i++)
+		cp->cpu_req[i] = 0;
+	spin_unlock_irqrestore(&cp->lock, flags);
+	return count;
+}
+
+static const struct file_operations _debug_stats_ops = {
+	.open =         _debug_stats_open,
+	.read =         _debug_stats_read,
+	.write =        _debug_stats_write,
+};
+
+static int _qcrypto_debug_init(void)
+{
+	int rc;
+	char name[DEBUG_MAX_FNAME];
+	struct dentry *dent;
+
+	_debug_dent = debugfs_create_dir("qcrypto", NULL);
+	if (IS_ERR(_debug_dent)) {
+		pr_err("qcrypto debugfs_create_dir fail, error %ld\n",
+				PTR_ERR(_debug_dent));
+		return PTR_ERR(_debug_dent);
+	}
+
+	snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", 1);
+	_debug_qcrypto = 0;
+	dent = debugfs_create_file(name, 0644, _debug_dent,
+				&_debug_qcrypto, &_debug_stats_ops);
+	if (dent == NULL) {
+		pr_err("qcrypto debugfs_create_file fail, error %ld\n",
+				PTR_ERR(dent));
+		rc = PTR_ERR(dent);
+		goto err;
+	}
+	return 0;
+err:
+	debugfs_remove_recursive(_debug_dent);
+	return rc;
+}
+
+static int __init _qcrypto_init(void)
+{
+	int rc;
+	struct crypto_priv *pcp = &qcrypto_dev;
+
+	rc = _qcrypto_debug_init();
+	if (rc)
+		return rc;
+	INIT_LIST_HEAD(&pcp->alg_list);
+	INIT_LIST_HEAD(&pcp->engine_list);
+	init_llist_head(&pcp->ordered_resp_list);
+	spin_lock_init(&pcp->lock);
+	mutex_init(&pcp->engine_lock);
+	pcp->resp_wq = alloc_workqueue("qcrypto_seq_response_wq",
+			WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
+	if (!pcp->resp_wq) {
+		pr_err("Error allocating workqueue\n");
+		return -ENOMEM;
+	}
+	INIT_WORK(&pcp->resp_work, seq_response);
+	pcp->total_units = 0;
+	pcp->platform_support.bus_scale_table = NULL;
+	pcp->next_engine = NULL;
+	pcp->scheduled_eng = NULL;
+	pcp->ce_req_proc_sts = IN_PROGRESS;
+	crypto_init_queue(&pcp->req_queue, MSM_QCRYPTO_REQ_QUEUE_LENGTH);
+	return platform_driver_register(&__qcrypto);
+}
+
+static void __exit _qcrypto_exit(void)
+{
+	pr_debug("%s Unregister QCRYPTO\n", __func__);
+	debugfs_remove_recursive(_debug_dent);
+	platform_driver_unregister(&__qcrypto);
+}
+
+module_init(_qcrypto_init);
+module_exit(_qcrypto_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI Crypto driver");
diff --git a/drivers/crypto/msm/qcryptohw_50.h b/drivers/crypto/msm/qcryptohw_50.h
new file mode 100644
index 0000000..574f579
--- /dev/null
+++ b/drivers/crypto/msm/qcryptohw_50.h
@@ -0,0 +1,528 @@
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_
+
+
+#define CRYPTO_BAM_CNFG_BITS_REG		0x0007C
+#define CRYPTO_BAM_CD_ENABLE			27
+#define CRYPTO_BAM_CD_ENABLE_MASK		(1 << CRYPTO_BAM_CD_ENABLE)
+
+#define QCE_AUTH_REG_BYTE_COUNT 4
+#define CRYPTO_VERSION_REG			0x1A000
+
+#define CRYPTO_DATA_IN0_REG			0x1A010
+#define CRYPTO_DATA_IN1_REG			0x1A014
+#define CRYPTO_DATA_IN2_REG			0x1A018
+#define CRYPTO_DATA_IN3_REG			0x1A01C
+
+#define CRYPTO_DATA_OUT0_REG			0x1A020
+#define CRYPTO_DATA_OUT1_REG			0x1A024
+#define CRYPTO_DATA_OUT2_REG			0x1A028
+#define CRYPTO_DATA_OUT3_REG			0x1A02C
+
+#define CRYPTO_STATUS_REG			0x1A100
+#define CRYPTO_STATUS2_REG			0x1A104
+#define CRYPTO_ENGINES_AVAIL			0x1A108
+#define CRYPTO_FIFO_SIZES_REG			0x1A10C
+
+#define CRYPTO_SEG_SIZE_REG			0x1A110
+#define CRYPTO_GOPROC_REG			0x1A120
+#define CRYPTO_GOPROC_QC_KEY_REG		0x1B000
+#define CRYPTO_GOPROC_OEM_KEY_REG		0x1C000
+
+#define CRYPTO_ENCR_SEG_CFG_REG			0x1A200
+#define CRYPTO_ENCR_SEG_SIZE_REG		0x1A204
+#define CRYPTO_ENCR_SEG_START_REG		0x1A208
+
+#define CRYPTO_ENCR_KEY0_REG			0x1D000
+#define CRYPTO_ENCR_KEY1_REG			0x1D004
+#define CRYPTO_ENCR_KEY2_REG			0x1D008
+#define CRYPTO_ENCR_KEY3_REG			0x1D00C
+#define CRYPTO_ENCR_KEY4_REG			0x1D010
+#define CRYPTO_ENCR_KEY5_REG			0x1D014
+#define CRYPTO_ENCR_KEY6_REG			0x1D018
+#define CRYPTO_ENCR_KEY7_REG			0x1D01C
+
+#define CRYPTO_ENCR_XTS_KEY0_REG		0x1D020
+#define CRYPTO_ENCR_XTS_KEY1_REG		0x1D024
+#define CRYPTO_ENCR_XTS_KEY2_REG		0x1D028
+#define CRYPTO_ENCR_XTS_KEY3_REG		0x1D02C
+#define CRYPTO_ENCR_XTS_KEY4_REG		0x1D030
+#define CRYPTO_ENCR_XTS_KEY5_REG		0x1D034
+#define CRYPTO_ENCR_XTS_KEY6_REG		0x1D038
+#define CRYPTO_ENCR_XTS_KEY7_REG		0x1D03C
+
+#define CRYPTO_ENCR_PIPE0_KEY0_REG		0x1E000
+#define CRYPTO_ENCR_PIPE0_KEY1_REG		0x1E004
+#define CRYPTO_ENCR_PIPE0_KEY2_REG		0x1E008
+#define CRYPTO_ENCR_PIPE0_KEY3_REG		0x1E00C
+#define CRYPTO_ENCR_PIPE0_KEY4_REG		0x1E010
+#define CRYPTO_ENCR_PIPE0_KEY5_REG		0x1E014
+#define CRYPTO_ENCR_PIPE0_KEY6_REG		0x1E018
+#define CRYPTO_ENCR_PIPE0_KEY7_REG		0x1E01C
+
+#define CRYPTO_ENCR_PIPE1_KEY0_REG		0x1E020
+#define CRYPTO_ENCR_PIPE1_KEY1_REG		0x1E024
+#define CRYPTO_ENCR_PIPE1_KEY2_REG		0x1E028
+#define CRYPTO_ENCR_PIPE1_KEY3_REG		0x1E02C
+#define CRYPTO_ENCR_PIPE1_KEY4_REG		0x1E030
+#define CRYPTO_ENCR_PIPE1_KEY5_REG		0x1E034
+#define CRYPTO_ENCR_PIPE1_KEY6_REG		0x1E038
+#define CRYPTO_ENCR_PIPE1_KEY7_REG		0x1E03C
+
+#define CRYPTO_ENCR_PIPE2_KEY0_REG		0x1E040
+#define CRYPTO_ENCR_PIPE2_KEY1_REG		0x1E044
+#define CRYPTO_ENCR_PIPE2_KEY2_REG		0x1E048
+#define CRYPTO_ENCR_PIPE2_KEY3_REG		0x1E04C
+#define CRYPTO_ENCR_PIPE2_KEY4_REG		0x1E050
+#define CRYPTO_ENCR_PIPE2_KEY5_REG		0x1E054
+#define CRYPTO_ENCR_PIPE2_KEY6_REG		0x1E058
+#define CRYPTO_ENCR_PIPE2_KEY7_REG		0x1E05C
+
+#define CRYPTO_ENCR_PIPE3_KEY0_REG		0x1E060
+#define CRYPTO_ENCR_PIPE3_KEY1_REG		0x1E064
+#define CRYPTO_ENCR_PIPE3_KEY2_REG		0x1E068
+#define CRYPTO_ENCR_PIPE3_KEY3_REG		0x1E06C
+#define CRYPTO_ENCR_PIPE3_KEY4_REG		0x1E070
+#define CRYPTO_ENCR_PIPE3_KEY5_REG		0x1E074
+#define CRYPTO_ENCR_PIPE3_KEY6_REG		0x1E078
+#define CRYPTO_ENCR_PIPE3_KEY7_REG		0x1E07C
+
+
+#define CRYPTO_ENCR_PIPE0_XTS_KEY0_REG		0x1E200
+#define CRYPTO_ENCR_PIPE0_XTS_KEY1_REG		0x1E204
+#define CRYPTO_ENCR_PIPE0_XTS_KEY2_REG		0x1E208
+#define CRYPTO_ENCR_PIPE0_XTS_KEY3_REG		0x1E20C
+#define CRYPTO_ENCR_PIPE0_XTS_KEY4_REG		0x1E210
+#define CRYPTO_ENCR_PIPE0_XTS_KEY5_REG		0x1E214
+#define CRYPTO_ENCR_PIPE0_XTS_KEY6_REG		0x1E218
+#define CRYPTO_ENCR_PIPE0_XTS_KEY7_REG		0x1E21C
+
+#define CRYPTO_ENCR_PIPE1_XTS_KEY0_REG		0x1E220
+#define CRYPTO_ENCR_PIPE1_XTS_KEY1_REG		0x1E224
+#define CRYPTO_ENCR_PIPE1_XTS_KEY2_REG		0x1E228
+#define CRYPTO_ENCR_PIPE1_XTS_KEY3_REG		0x1E22C
+#define CRYPTO_ENCR_PIPE1_XTS_KEY4_REG		0x1E230
+#define CRYPTO_ENCR_PIPE1_XTS_KEY5_REG		0x1E234
+#define CRYPTO_ENCR_PIPE1_XTS_KEY6_REG		0x1E238
+#define CRYPTO_ENCR_PIPE1_XTS_KEY7_REG		0x1E23C
+
+#define CRYPTO_ENCR_PIPE2_XTS_KEY0_REG		0x1E240
+#define CRYPTO_ENCR_PIPE2_XTS_KEY1_REG		0x1E244
+#define CRYPTO_ENCR_PIPE2_XTS_KEY2_REG		0x1E248
+#define CRYPTO_ENCR_PIPE2_XTS_KEY3_REG		0x1E24C
+#define CRYPTO_ENCR_PIPE2_XTS_KEY4_REG		0x1E250
+#define CRYPTO_ENCR_PIPE2_XTS_KEY5_REG		0x1E254
+#define CRYPTO_ENCR_PIPE2_XTS_KEY6_REG		0x1E258
+#define CRYPTO_ENCR_PIPE2_XTS_KEY7_REG		0x1E25C
+
+#define CRYPTO_ENCR_PIPE3_XTS_KEY0_REG		0x1E260
+#define CRYPTO_ENCR_PIPE3_XTS_KEY1_REG		0x1E264
+#define CRYPTO_ENCR_PIPE3_XTS_KEY2_REG		0x1E268
+#define CRYPTO_ENCR_PIPE3_XTS_KEY3_REG		0x1E26C
+#define CRYPTO_ENCR_PIPE3_XTS_KEY4_REG		0x1E270
+#define CRYPTO_ENCR_PIPE3_XTS_KEY5_REG		0x1E274
+#define CRYPTO_ENCR_PIPE3_XTS_KEY6_REG		0x1E278
+#define CRYPTO_ENCR_PIPE3_XTS_KEY7_REG		0x1E27C
+
+
+#define CRYPTO_CNTR0_IV0_REG			0x1A20C
+#define CRYPTO_CNTR1_IV1_REG			0x1A210
+#define CRYPTO_CNTR2_IV2_REG			0x1A214
+#define CRYPTO_CNTR3_IV3_REG			0x1A218
+
+#define CRYPTO_CNTR_MASK_REG0			0x1A23C
+#define CRYPTO_CNTR_MASK_REG1			0x1A238
+#define CRYPTO_CNTR_MASK_REG2			0x1A234
+#define CRYPTO_CNTR_MASK_REG			0x1A21C
+
+#define CRYPTO_ENCR_CCM_INT_CNTR0_REG		0x1A220
+#define CRYPTO_ENCR_CCM_INT_CNTR1_REG		0x1A224
+#define CRYPTO_ENCR_CCM_INT_CNTR2_REG		0x1A228
+#define CRYPTO_ENCR_CCM_INT_CNTR3_REG		0x1A22C
+
+#define CRYPTO_ENCR_XTS_DU_SIZE_REG		0x1A230
+
+#define CRYPTO_AUTH_SEG_CFG_REG			0x1A300
+#define CRYPTO_AUTH_SEG_SIZE_REG		0x1A304
+#define CRYPTO_AUTH_SEG_START_REG		0x1A308
+
+#define CRYPTO_AUTH_KEY0_REG			0x1D040
+#define CRYPTO_AUTH_KEY1_REG			0x1D044
+#define CRYPTO_AUTH_KEY2_REG			0x1D048
+#define CRYPTO_AUTH_KEY3_REG			0x1D04C
+#define CRYPTO_AUTH_KEY4_REG			0x1D050
+#define CRYPTO_AUTH_KEY5_REG			0x1D054
+#define CRYPTO_AUTH_KEY6_REG			0x1D058
+#define CRYPTO_AUTH_KEY7_REG			0x1D05C
+#define CRYPTO_AUTH_KEY8_REG			0x1D060
+#define CRYPTO_AUTH_KEY9_REG			0x1D064
+#define CRYPTO_AUTH_KEY10_REG			0x1D068
+#define CRYPTO_AUTH_KEY11_REG			0x1D06C
+#define CRYPTO_AUTH_KEY12_REG			0x1D070
+#define CRYPTO_AUTH_KEY13_REG			0x1D074
+#define CRYPTO_AUTH_KEY14_REG			0x1D078
+#define CRYPTO_AUTH_KEY15_REG			0x1D07C
+
+#define CRYPTO_AUTH_PIPE0_KEY0_REG		0x1E800
+#define CRYPTO_AUTH_PIPE0_KEY1_REG		0x1E804
+#define CRYPTO_AUTH_PIPE0_KEY2_REG		0x1E808
+#define CRYPTO_AUTH_PIPE0_KEY3_REG		0x1E80C
+#define CRYPTO_AUTH_PIPE0_KEY4_REG		0x1E810
+#define CRYPTO_AUTH_PIPE0_KEY5_REG		0x1E814
+#define CRYPTO_AUTH_PIPE0_KEY6_REG		0x1E818
+#define CRYPTO_AUTH_PIPE0_KEY7_REG		0x1E81C
+#define CRYPTO_AUTH_PIPE0_KEY8_REG		0x1E820
+#define CRYPTO_AUTH_PIPE0_KEY9_REG		0x1E824
+#define CRYPTO_AUTH_PIPE0_KEY10_REG		0x1E828
+#define CRYPTO_AUTH_PIPE0_KEY11_REG		0x1E82C
+#define CRYPTO_AUTH_PIPE0_KEY12_REG		0x1E830
+#define CRYPTO_AUTH_PIPE0_KEY13_REG		0x1E834
+#define CRYPTO_AUTH_PIPE0_KEY14_REG		0x1E838
+#define CRYPTO_AUTH_PIPE0_KEY15_REG		0x1E83C
+
+#define CRYPTO_AUTH_PIPE1_KEY0_REG		0x1E880
+#define CRYPTO_AUTH_PIPE1_KEY1_REG		0x1E884
+#define CRYPTO_AUTH_PIPE1_KEY2_REG		0x1E888
+#define CRYPTO_AUTH_PIPE1_KEY3_REG		0x1E88C
+#define CRYPTO_AUTH_PIPE1_KEY4_REG		0x1E890
+#define CRYPTO_AUTH_PIPE1_KEY5_REG		0x1E894
+#define CRYPTO_AUTH_PIPE1_KEY6_REG		0x1E898
+#define CRYPTO_AUTH_PIPE1_KEY7_REG		0x1E89C
+#define CRYPTO_AUTH_PIPE1_KEY8_REG		0x1E8A0
+#define CRYPTO_AUTH_PIPE1_KEY9_REG		0x1E8A4
+#define CRYPTO_AUTH_PIPE1_KEY10_REG		0x1E8A8
+#define CRYPTO_AUTH_PIPE1_KEY11_REG		0x1E8AC
+#define CRYPTO_AUTH_PIPE1_KEY12_REG		0x1E8B0
+#define CRYPTO_AUTH_PIPE1_KEY13_REG		0x1E8B4
+#define CRYPTO_AUTH_PIPE1_KEY14_REG		0x1E8B8
+#define CRYPTO_AUTH_PIPE1_KEY15_REG		0x1E8BC
+
+#define CRYPTO_AUTH_PIPE2_KEY0_REG		0x1E900
+#define CRYPTO_AUTH_PIPE2_KEY1_REG		0x1E904
+#define CRYPTO_AUTH_PIPE2_KEY2_REG		0x1E908
+#define CRYPTO_AUTH_PIPE2_KEY3_REG		0x1E90C
+#define CRYPTO_AUTH_PIPE2_KEY4_REG		0x1E910
+#define CRYPTO_AUTH_PIPE2_KEY5_REG		0x1E914
+#define CRYPTO_AUTH_PIPE2_KEY6_REG		0x1E918
+#define CRYPTO_AUTH_PIPE2_KEY7_REG		0x1E91C
+#define CRYPTO_AUTH_PIPE2_KEY8_REG		0x1E920
+#define CRYPTO_AUTH_PIPE2_KEY9_REG		0x1E924
+#define CRYPTO_AUTH_PIPE2_KEY10_REG		0x1E928
+#define CRYPTO_AUTH_PIPE2_KEY11_REG		0x1E92C
+#define CRYPTO_AUTH_PIPE2_KEY12_REG		0x1E930
+#define CRYPTO_AUTH_PIPE2_KEY13_REG		0x1E934
+#define CRYPTO_AUTH_PIPE2_KEY14_REG		0x1E938
+#define CRYPTO_AUTH_PIPE2_KEY15_REG		0x1E93C
+
+#define CRYPTO_AUTH_PIPE3_KEY0_REG		0x1E980
+#define CRYPTO_AUTH_PIPE3_KEY1_REG		0x1E984
+#define CRYPTO_AUTH_PIPE3_KEY2_REG		0x1E988
+#define CRYPTO_AUTH_PIPE3_KEY3_REG		0x1E98C
+#define CRYPTO_AUTH_PIPE3_KEY4_REG		0x1E990
+#define CRYPTO_AUTH_PIPE3_KEY5_REG		0x1E994
+#define CRYPTO_AUTH_PIPE3_KEY6_REG		0x1E998
+#define CRYPTO_AUTH_PIPE3_KEY7_REG		0x1E99C
+#define CRYPTO_AUTH_PIPE3_KEY8_REG		0x1E9A0
+#define CRYPTO_AUTH_PIPE3_KEY9_REG		0x1E9A4
+#define CRYPTO_AUTH_PIPE3_KEY10_REG		0x1E9A8
+#define CRYPTO_AUTH_PIPE3_KEY11_REG		0x1E9AC
+#define CRYPTO_AUTH_PIPE3_KEY12_REG		0x1E9B0
+#define CRYPTO_AUTH_PIPE3_KEY13_REG		0x1E9B4
+#define CRYPTO_AUTH_PIPE3_KEY14_REG		0x1E9B8
+#define CRYPTO_AUTH_PIPE3_KEY15_REG		0x1E9BC
+
+
+#define CRYPTO_AUTH_IV0_REG			0x1A310
+#define CRYPTO_AUTH_IV1_REG			0x1A314
+#define CRYPTO_AUTH_IV2_REG			0x1A318
+#define CRYPTO_AUTH_IV3_REG			0x1A31C
+#define CRYPTO_AUTH_IV4_REG			0x1A320
+#define CRYPTO_AUTH_IV5_REG			0x1A324
+#define CRYPTO_AUTH_IV6_REG			0x1A328
+#define CRYPTO_AUTH_IV7_REG			0x1A32C
+#define CRYPTO_AUTH_IV8_REG			0x1A330
+#define CRYPTO_AUTH_IV9_REG			0x1A334
+#define CRYPTO_AUTH_IV10_REG			0x1A338
+#define CRYPTO_AUTH_IV11_REG			0x1A33C
+#define CRYPTO_AUTH_IV12_REG			0x1A340
+#define CRYPTO_AUTH_IV13_REG			0x1A344
+#define CRYPTO_AUTH_IV14_REG			0x1A348
+#define CRYPTO_AUTH_IV15_REG			0x1A34C
+
+#define CRYPTO_AUTH_INFO_NONCE0_REG		0x1A350
+#define CRYPTO_AUTH_INFO_NONCE1_REG		0x1A354
+#define CRYPTO_AUTH_INFO_NONCE2_REG		0x1A358
+#define CRYPTO_AUTH_INFO_NONCE3_REG		0x1A35C
+
+#define CRYPTO_AUTH_BYTECNT0_REG		0x1A390
+#define CRYPTO_AUTH_BYTECNT1_REG		0x1A394
+#define CRYPTO_AUTH_BYTECNT2_REG		0x1A398
+#define CRYPTO_AUTH_BYTECNT3_REG		0x1A39C
+
+#define CRYPTO_AUTH_EXP_MAC0_REG		0x1A3A0
+#define CRYPTO_AUTH_EXP_MAC1_REG		0x1A3A4
+#define CRYPTO_AUTH_EXP_MAC2_REG		0x1A3A8
+#define CRYPTO_AUTH_EXP_MAC3_REG		0x1A3AC
+#define CRYPTO_AUTH_EXP_MAC4_REG		0x1A3B0
+#define CRYPTO_AUTH_EXP_MAC5_REG		0x1A3B4
+#define CRYPTO_AUTH_EXP_MAC6_REG		0x1A3B8
+#define CRYPTO_AUTH_EXP_MAC7_REG		0x1A3BC
+
+#define CRYPTO_CONFIG_REG			0x1A400
+#define CRYPTO_DEBUG_ENABLE_REG			0x1AF00
+#define CRYPTO_DEBUG_REG			0x1AF04
+
+
+
+/* Register bits */
+#define CRYPTO_CORE_STEP_REV_MASK		0xFFFF
+#define CRYPTO_CORE_STEP_REV			0 /* bit 15-0 */
+#define CRYPTO_CORE_MAJOR_REV_MASK		0xFF000000
+#define CRYPTO_CORE_MAJOR_REV			24 /* bit 31-24 */
+#define CRYPTO_CORE_MINOR_REV_MASK		0xFF0000
+#define CRYPTO_CORE_MINOR_REV			16 /* bit 23-16 */
+
+/* status reg  */
+#define CRYPTO_MAC_FAILED			31
+#define CRYPTO_DOUT_SIZE_AVAIL			26 /* bit 30-26 */
+#define CRYPTO_DOUT_SIZE_AVAIL_MASK		(0x1F << CRYPTO_DOUT_SIZE_AVAIL)
+#define CRYPTO_DIN_SIZE_AVAIL			21 /* bit 21-25 */
+#define CRYPTO_DIN_SIZE_AVAIL_MASK		(0x1F << CRYPTO_DIN_SIZE_AVAIL)
+#define CRYPTO_HSD_ERR				20
+#define CRYPTO_ACCESS_VIOL			19
+#define CRYPTO_PIPE_ACTIVE_ERR			18
+#define CRYPTO_CFG_CHNG_ERR			17
+#define CRYPTO_DOUT_ERR				16
+#define CRYPTO_DIN_ERR				15
+#define CRYPTO_AXI_ERR				14
+#define CRYPTO_CRYPTO_STATE			10 /* bit 13-10 */
+#define CRYPTO_CRYPTO_STATE_MASK		(0xF << CRYPTO_CRYPTO_STATE)
+#define CRYPTO_ENCR_BUSY			9
+#define CRYPTO_AUTH_BUSY			8
+#define CRYPTO_DOUT_INTR			7
+#define CRYPTO_DIN_INTR				6
+#define CRYPTO_OP_DONE_INTR			5
+#define CRYPTO_ERR_INTR				4
+#define CRYPTO_DOUT_RDY				3
+#define CRYPTO_DIN_RDY				2
+#define CRYPTO_OPERATION_DONE			1
+#define CRYPTO_SW_ERR				0
+
+/* status2 reg  */
+#define CRYPTO_AXI_EXTRA			1
+#define CRYPTO_LOCKED				2
+
+/* config reg */
+#define CRYPTO_REQ_SIZE				17 /* bit 20-17 */
+#define CRYPTO_REQ_SIZE_MASK			(0xF << CRYPTO_REQ_SIZE)
+#define CRYPTO_REQ_SIZE_ENUM_1_BEAT	0
+#define CRYPTO_REQ_SIZE_ENUM_2_BEAT	1
+#define CRYPTO_REQ_SIZE_ENUM_3_BEAT	2
+#define CRYPTO_REQ_SIZE_ENUM_4_BEAT	3
+#define CRYPTO_REQ_SIZE_ENUM_5_BEAT	4
+#define CRYPTO_REQ_SIZE_ENUM_6_BEAT	5
+#define CRYPTO_REQ_SIZE_ENUM_7_BEAT	6
+#define CRYPTO_REQ_SIZE_ENUM_8_BEAT	7
+#define CRYPTO_REQ_SIZE_ENUM_9_BEAT	8
+#define CRYPTO_REQ_SIZE_ENUM_10_BEAT	9
+#define CRYPTO_REQ_SIZE_ENUM_11_BEAT	10
+#define CRYPTO_REQ_SIZE_ENUM_12_BEAT	11
+#define CRYPTO_REQ_SIZE_ENUM_13_BEAT	12
+#define CRYPTO_REQ_SIZE_ENUM_14_BEAT	13
+#define CRYPTO_REQ_SIZE_ENUM_15_BEAT	14
+#define CRYPTO_REQ_SIZE_ENUM_16_BEAT	15
+
+#define CRYPTO_MAX_QUEUED_REQ			14 /* bit 16-14 */
+#define CRYPTO_MAX_QUEUED_REQ_MASK		(0x7 << CRYPTO_MAX_QUEUED_REQ)
+#define CRYPTO_ENUM_1_QUEUED_REQS	0
+#define CRYPTO_ENUM_2_QUEUED_REQS	1
+#define CRYPTO_ENUM_3_QUEUED_REQS	2
+
+#define CRYPTO_IRQ_ENABLES			10	/* bit 13-10 */
+#define CRYPTO_IRQ_ENABLES_MASK			(0xF << CRYPTO_IRQ_ENABLES)
+
+#define CRYPTO_LITTLE_ENDIAN_MODE		9
+#define CRYPTO_LITTLE_ENDIAN_MASK		(1 << CRYPTO_LITTLE_ENDIAN_MODE)
+#define CRYPTO_PIPE_SET_SELECT			5 /* bit 8-5 */
+#define CRYPTO_PIPE_SET_SELECT_MASK		(0xF << CRYPTO_PIPE_SET_SELECT)
+
+#define CRYPTO_HIGH_SPD_EN_N			4
+
+#define CRYPTO_MASK_DOUT_INTR			3
+#define CRYPTO_MASK_DIN_INTR			2
+#define CRYPTO_MASK_OP_DONE_INTR		1
+#define CRYPTO_MASK_ERR_INTR			0
+
+/* auth_seg_cfg reg */
+#define CRYPTO_COMP_EXP_MAC			24
+#define CRYPTO_COMP_EXP_MAC_DISABLED		0
+#define CRYPTO_COMP_EXP_MAC_ENABLED		1
+
+#define CRYPTO_F9_DIRECTION			23
+#define CRYPTO_F9_DIRECTION_UPLINK		0
+#define CRYPTO_F9_DIRECTION_DOWNLINK		1
+
+#define CRYPTO_AUTH_NONCE_NUM_WORDS		20 /* bit 22-20 */
+#define CRYPTO_AUTH_NONCE_NUM_WORDS_MASK \
+				(0x7 << CRYPTO_AUTH_NONCE_NUM_WORDS)
+
+#define CRYPTO_USE_PIPE_KEY_AUTH		19
+#define CRYPTO_USE_HW_KEY_AUTH			18
+#define CRYPTO_FIRST				17
+#define CRYPTO_LAST				16
+
+#define CRYPTO_AUTH_POS				14 /* bit 15 .. 14*/
+#define CRYPTO_AUTH_POS_MASK			(0x3 << CRYPTO_AUTH_POS)
+#define CRYPTO_AUTH_POS_BEFORE			0
+#define CRYPTO_AUTH_POS_AFTER			1
+
+#define CRYPTO_AUTH_SIZE			9 /* bits 13 .. 9*/
+#define CRYPTO_AUTH_SIZE_MASK			(0x1F << CRYPTO_AUTH_SIZE)
+#define CRYPTO_AUTH_SIZE_SHA1		0
+#define CRYPTO_AUTH_SIZE_SHA256		1
+#define CRYPTO_AUTH_SIZE_ENUM_1_BYTES	0
+#define CRYPTO_AUTH_SIZE_ENUM_2_BYTES	1
+#define CRYPTO_AUTH_SIZE_ENUM_3_BYTES	2
+#define CRYPTO_AUTH_SIZE_ENUM_4_BYTES	3
+#define CRYPTO_AUTH_SIZE_ENUM_5_BYTES	4
+#define CRYPTO_AUTH_SIZE_ENUM_6_BYTES	5
+#define CRYPTO_AUTH_SIZE_ENUM_7_BYTES	6
+#define CRYPTO_AUTH_SIZE_ENUM_8_BYTES	7
+#define CRYPTO_AUTH_SIZE_ENUM_9_BYTES	8
+#define CRYPTO_AUTH_SIZE_ENUM_10_BYTES	9
+#define CRYPTO_AUTH_SIZE_ENUM_11_BYTES	10
+#define CRYPTO_AUTH_SIZE_ENUM_12_BYTES	11
+#define CRYPTO_AUTH_SIZE_ENUM_13_BYTES	12
+#define CRYPTO_AUTH_SIZE_ENUM_14_BYTES	13
+#define CRYPTO_AUTH_SIZE_ENUM_15_BYTES	14
+#define CRYPTO_AUTH_SIZE_ENUM_16_BYTES	15
+
+
+#define CRYPTO_AUTH_MODE			6 /* bit 8 .. 6*/
+#define CRYPTO_AUTH_MODE_MASK			(0x7 << CRYPTO_AUTH_MODE)
+#define CRYPTO_AUTH_MODE_HASH	0
+#define CRYPTO_AUTH_MODE_HMAC	1
+#define CRYPTO_AUTH_MODE_CCM	0
+#define CRYPTO_AUTH_MODE_CMAC	1
+
+#define CRYPTO_AUTH_KEY_SIZE			3  /* bit 5 .. 3*/
+#define CRYPTO_AUTH_KEY_SIZE_MASK		(0x7 << CRYPTO_AUTH_KEY_SIZE)
+#define CRYPTO_AUTH_KEY_SZ_AES128	0
+#define CRYPTO_AUTH_KEY_SZ_AES256	2
+
+#define CRYPTO_AUTH_ALG				0 /* bit 2 .. 0*/
+#define CRYPTO_AUTH_ALG_MASK			7
+#define CRYPTO_AUTH_ALG_NONE	0
+#define CRYPTO_AUTH_ALG_SHA	1
+#define CRYPTO_AUTH_ALG_AES	2
+#define CRYPTO_AUTH_ALG_KASUMI	3
+#define CRYPTO_AUTH_ALG_SNOW3G	4
+#define CRYPTO_AUTH_ALG_ZUC	5
+
+/* encr_xts_du_size reg */
+#define CRYPTO_ENCR_XTS_DU_SIZE			0 /* bit 19-0  */
+#define CRYPTO_ENCR_XTS_DU_SIZE_MASK		0xfffff
+
+/* encr_seg_cfg reg */
+#define CRYPTO_F8_KEYSTREAM_ENABLE		17/* bit */
+#define CRYPTO_F8_KEYSTREAM_DISABLED	0
+#define CRYPTO_F8_KEYSTREAM_ENABLED	1
+
+#define CRYPTO_F8_DIRECTION			16 /* bit */
+#define CRYPTO_F8_DIRECTION_UPLINK	0
+#define CRYPTO_F8_DIRECTION_DOWNLINK	1
+
+
+#define CRYPTO_USE_PIPE_KEY_ENCR		15 /* bit */
+#define CRYPTO_USE_PIPE_KEY_ENCR_ENABLED	1
+#define CRYPTO_USE_KEY_REGISTERS		0
+
+
+#define CRYPTO_USE_HW_KEY_ENCR			14
+#define CRYPTO_USE_KEY_REG	0
+#define CRYPTO_USE_HW_KEY	1
+
+#define CRYPTO_LAST_CCM				13
+#define CRYPTO_LAST_CCM_XFR	1
+#define CRYPTO_INTERM_CCM_XFR	0
+
+
+#define CRYPTO_CNTR_ALG				11 /* bit 12-11 */
+#define CRYPTO_CNTR_ALG_MASK			(3 << CRYPTO_CNTR_ALG)
+#define CRYPTO_CNTR_ALG_NIST	0
+
+#define CRYPTO_ENCODE				10
+
+#define CRYPTO_ENCR_MODE			6 /* bit 9-6 */
+#define CRYPTO_ENCR_MODE_MASK			(0xF << CRYPTO_ENCR_MODE)
+/* only valid when AES */
+#define CRYPTO_ENCR_MODE_ECB	0
+#define CRYPTO_ENCR_MODE_CBC	1
+#define CRYPTO_ENCR_MODE_CTR	2
+#define CRYPTO_ENCR_MODE_XTS	3
+#define CRYPTO_ENCR_MODE_CCM	4
+
+#define CRYPTO_ENCR_KEY_SZ			3 /* bit 5-3 */
+#define CRYPTO_ENCR_KEY_SZ_MASK			(7 << CRYPTO_ENCR_KEY_SZ)
+#define CRYPTO_ENCR_KEY_SZ_DES		0
+#define CRYPTO_ENCR_KEY_SZ_3DES		1
+#define CRYPTO_ENCR_KEY_SZ_AES128	0
+#define CRYPTO_ENCR_KEY_SZ_AES256	2
+
+#define CRYPTO_ENCR_ALG				0 /* bit 2-0 */
+#define CRYPTO_ENCR_ALG_MASK			(7 << CRYPTO_ENCR_ALG)
+#define CRYPTO_ENCR_ALG_NONE		0
+#define CRYPTO_ENCR_ALG_DES		1
+#define CRYPTO_ENCR_ALG_AES		2
+#define CRYPTO_ENCR_ALG_KASUMI		4
+#define CRYPTO_ENCR_ALG_SNOW_3G		5
+#define CRYPTO_ENCR_ALG_ZUC		6
+
+/* goproc reg */
+#define CRYPTO_GO				0
+#define CRYPTO_CLR_CNTXT			1
+#define CRYPTO_RESULTS_DUMP			2
+
+/*  F8 definition of CRYPTO_ENCR_CNTR1_IV1 REG  */
+#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT		16	/* bit 31 - 16 */
+#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT_MASK \
+		(0xffff << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT)
+
+#define CRYPTO_CNTR1_IV1_REG_F8_BEARER		0	/* bit 4 - 0 */
+#define CRYPTO_CNTR1_IV1_REG_F8_BEARER_MASK \
+		(0x1f << CRYPTO_CNTR1_IV1_REG_F8_BEARER)
+
+/* F9 definition of CRYPTO_AUTH_IV4 REG */
+#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS	0	/* bit 2 - 0 */
+#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS_MASK \
+		(0x7  << CRYPTO_AUTH_IV4_REG_F9_VALID_BIS)
+
+/* engines_avail */
+#define CRYPTO_ENCR_AES_SEL			0
+#define CRYPTO_DES_SEL				1
+#define CRYPTO_ENCR_SNOW3G_SEL			2
+#define CRYPTO_ENCR_KASUMI_SEL			3
+#define CRYPTO_SHA_SEL				4
+#define CRYPTO_SHA512_SEL			5
+#define CRYPTO_AUTH_AES_SEL			6
+#define CRYPTO_AUTH_SNOW3G_SEL			7
+#define CRYPTO_AUTH_KASUMI_SEL			8
+#define CRYPTO_BAM_PIPE_SETS			9	/* bit 12 - 9 */
+#define CRYPTO_AXI_WR_BEATS			13	/* bit 18 - 13 */
+#define CRYPTO_AXI_RD_BEATS			19	/* bit 24 - 19 */
+#define CRYPTO_ENCR_ZUC_SEL			26
+#define CRYPTO_AUTH_ZUC_SEL			27
+#define CRYPTO_ZUC_ENABLE			28
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_50_H_ */
diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig
index bca172d..8e2eb35 100644
--- a/drivers/firmware/Kconfig
+++ b/drivers/firmware/Kconfig
@@ -210,5 +210,6 @@
 source "drivers/firmware/google/Kconfig"
 source "drivers/firmware/efi/Kconfig"
 source "drivers/firmware/meson/Kconfig"
+source "drivers/firmware/qcom/Kconfig"
 
 endmenu
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 898ac41..b1c1b21 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -26,3 +26,4 @@
 obj-$(CONFIG_GOOGLE_FIRMWARE)	+= google/
 obj-$(CONFIG_EFI)		+= efi/
 obj-$(CONFIG_UEFI_CPER)		+= efi/
+obj-$(CONFIG_MSM_TZ_LOG)	+= qcom/
diff --git a/drivers/firmware/qcom/Kconfig b/drivers/firmware/qcom/Kconfig
new file mode 100644
index 0000000..61c7974
--- /dev/null
+++ b/drivers/firmware/qcom/Kconfig
@@ -0,0 +1,7 @@
+config MSM_TZ_LOG
+        tristate "MSM Trust Zone (TZ) Log Driver"
+        depends on DEBUG_FS
+        help
+          This option enables a driver with a debugfs interface for messages
+          produced by the Secure code (Trust zone). These messages provide
+          diagnostic information about TZ operation.
diff --git a/drivers/firmware/qcom/Makefile b/drivers/firmware/qcom/Makefile
new file mode 100644
index 0000000..635f60c
--- /dev/null
+++ b/drivers/firmware/qcom/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_MSM_TZ_LOG) += tz_log.o
diff --git a/drivers/firmware/qcom/tz_log.c b/drivers/firmware/qcom/tz_log.c
new file mode 100644
index 0000000..1b51d08
--- /dev/null
+++ b/drivers/firmware/qcom/tz_log.c
@@ -0,0 +1,1209 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/msm_ion.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/of.h>
+
+#include <soc/qcom/scm.h>
+#include <soc/qcom/qseecomi.h>
+
+/* QSEE_LOG_BUF_SIZE = 32K */
+#define QSEE_LOG_BUF_SIZE 0x8000
+
+
+/* TZ Diagnostic Area legacy version number */
+#define TZBSP_DIAG_MAJOR_VERSION_LEGACY	2
+/*
+ * Preprocessor Definitions and Constants
+ */
+#define TZBSP_MAX_CPU_COUNT 0x08
+/*
+ * Number of VMID Tables
+ */
+#define TZBSP_DIAG_NUM_OF_VMID 16
+/*
+ * VMID Description length
+ */
+#define TZBSP_DIAG_VMID_DESC_LEN 7
+/*
+ * Number of Interrupts
+ */
+#define TZBSP_DIAG_INT_NUM  32
+/*
+ * Length of descriptive name associated with Interrupt
+ */
+#define TZBSP_MAX_INT_DESC 16
+/*
+ * TZ 3.X version info
+ */
+#define QSEE_VERSION_TZ_3_X 0x800000
+/*
+ * TZ 4.X version info
+ */
+#define QSEE_VERSION_TZ_4_X 0x1000000
+
+#define TZBSP_AES_256_ENCRYPTED_KEY_SIZE 256
+#define TZBSP_NONCE_LEN 12
+#define TZBSP_TAG_LEN 16
+
+/*
+ * VMID Table
+ */
+struct tzdbg_vmid_t {
+	uint8_t vmid; /* Virtual Machine Identifier */
+	uint8_t desc[TZBSP_DIAG_VMID_DESC_LEN];	/* ASCII Text */
+};
+/*
+ * Boot Info Table
+ */
+struct tzdbg_boot_info_t {
+	uint32_t wb_entry_cnt;	/* Warmboot entry CPU Counter */
+	uint32_t wb_exit_cnt;	/* Warmboot exit CPU Counter */
+	uint32_t pc_entry_cnt;	/* Power Collapse entry CPU Counter */
+	uint32_t pc_exit_cnt;	/* Power Collapse exit CPU counter */
+	uint32_t warm_jmp_addr;	/* Last Warmboot Jump Address */
+	uint32_t spare;	/* Reserved for future use. */
+};
+/*
+ * Boot Info Table for 64-bit
+ */
+struct tzdbg_boot_info64_t {
+	uint32_t wb_entry_cnt;  /* Warmboot entry CPU Counter */
+	uint32_t wb_exit_cnt;   /* Warmboot exit CPU Counter */
+	uint32_t pc_entry_cnt;  /* Power Collapse entry CPU Counter */
+	uint32_t pc_exit_cnt;   /* Power Collapse exit CPU counter */
+	uint32_t psci_entry_cnt;/* PSCI syscall entry CPU Counter */
+	uint32_t psci_exit_cnt;   /* PSCI syscall exit CPU Counter */
+	uint64_t warm_jmp_addr; /* Last Warmboot Jump Address */
+	uint32_t warm_jmp_instr; /* Last Warmboot Jump Address Instruction */
+};
+/*
+ * Reset Info Table
+ */
+struct tzdbg_reset_info_t {
+	uint32_t reset_type;	/* Reset Reason */
+	uint32_t reset_cnt;	/* Number of resets occurred/CPU */
+};
+/*
+ * Interrupt Info Table
+ */
+struct tzdbg_int_t {
+	/*
+	 * Type of Interrupt/exception
+	 */
+	uint16_t int_info;
+	/*
+	 * Availability of the slot
+	 */
+	uint8_t avail;
+	/*
+	 * Reserved for future use
+	 */
+	uint8_t spare;
+	/*
+	 * Interrupt # for IRQ and FIQ
+	 */
+	uint32_t int_num;
+	/*
+	 * ASCII text describing type of interrupt e.g:
+	 * Secure Timer, EBI XPU. This string is always null terminated,
+	 * supporting at most TZBSP_MAX_INT_DESC characters.
+	 * Any additional characters are truncated.
+	 */
+	uint8_t int_desc[TZBSP_MAX_INT_DESC];
+	uint64_t int_count[TZBSP_MAX_CPU_COUNT]; /* # of times seen per CPU */
+};
+
+/*
+ * Interrupt Info Table used in tz version >=4.X
+ */
+struct tzdbg_int_t_tz40 {
+	uint16_t int_info;
+	uint8_t avail;
+	uint8_t spare;
+	uint32_t int_num;
+	uint8_t int_desc[TZBSP_MAX_INT_DESC];
+	uint32_t int_count[TZBSP_MAX_CPU_COUNT]; /* uint32_t in TZ ver >= 4.x*/
+};
+
+/* warm boot reason for cores */
+struct tzbsp_diag_wakeup_info_t {
+	/* Wake source info : APCS_GICC_HPPIR */
+	uint32_t HPPIR;
+	/* Wake source info : APCS_GICC_AHPPIR */
+	uint32_t AHPPIR;
+};
+
+/*
+ * Log ring buffer position
+ */
+struct tzdbg_log_pos_t {
+	uint16_t wrap;
+	uint16_t offset;
+};
+
+ /*
+  * Log ring buffer
+  */
+struct tzdbg_log_t {
+	struct tzdbg_log_pos_t	log_pos;
+	/* open ended array to the end of the 4K IMEM buffer */
+	uint8_t					log_buf[];
+};
+
+/*
+ * Diagnostic Table
+ * Note: This is the reference data structure for tz diagnostic table
+ * supporting TZBSP_MAX_CPU_COUNT, the real diagnostic data is directly
+ * copied into buffer from i/o memory.
+ */
+struct tzdbg_t {
+	uint32_t magic_num;
+	uint32_t version;
+	/*
+	 * Number of CPU's
+	 */
+	uint32_t cpu_count;
+	/*
+	 * Offset of VMID Table
+	 */
+	uint32_t vmid_info_off;
+	/*
+	 * Offset of Boot Table
+	 */
+	uint32_t boot_info_off;
+	/*
+	 * Offset of Reset info Table
+	 */
+	uint32_t reset_info_off;
+	/*
+	 * Offset of Interrupt info Table
+	 */
+	uint32_t int_info_off;
+	/*
+	 * Ring Buffer Offset
+	 */
+	uint32_t ring_off;
+	/*
+	 * Ring Buffer Length
+	 */
+	uint32_t ring_len;
+
+	/* Offset for Wakeup info */
+	uint32_t wakeup_info_off;
+
+	/*
+	 * VMID to EE Mapping
+	 */
+	struct tzdbg_vmid_t vmid_info[TZBSP_DIAG_NUM_OF_VMID];
+	/*
+	 * Boot Info
+	 */
+	struct tzdbg_boot_info_t  boot_info[TZBSP_MAX_CPU_COUNT];
+	/*
+	 * Reset Info
+	 */
+	struct tzdbg_reset_info_t reset_info[TZBSP_MAX_CPU_COUNT];
+	uint32_t num_interrupts;
+	struct tzdbg_int_t  int_info[TZBSP_DIAG_INT_NUM];
+
+	/* Wake up info */
+	struct tzbsp_diag_wakeup_info_t  wakeup_info[TZBSP_MAX_CPU_COUNT];
+
+	uint8_t key[TZBSP_AES_256_ENCRYPTED_KEY_SIZE];
+
+	uint8_t nonce[TZBSP_NONCE_LEN];
+
+	uint8_t tag[TZBSP_TAG_LEN];
+
+	/*
+	 * We need at least 2K for the ring buffer
+	 */
+	struct tzdbg_log_t ring_buffer;	/* TZ Ring Buffer */
+};
+
+struct hypdbg_log_pos_t {
+	uint16_t wrap;
+	uint16_t offset;
+};
+
+struct hypdbg_boot_info_t {
+	uint32_t warm_entry_cnt;
+	uint32_t warm_exit_cnt;
+};
+
+struct hypdbg_t {
+	/* Magic Number */
+	uint32_t magic_num;
+
+	/* Number of CPU's */
+	uint32_t cpu_count;
+
+	/* Ring Buffer Offset */
+	uint32_t ring_off;
+
+	/* Ring buffer position mgmt */
+	struct hypdbg_log_pos_t log_pos;
+	uint32_t log_len;
+
+	/* S2 fault numbers */
+	uint32_t s2_fault_counter;
+
+	/* Boot Info */
+	struct hypdbg_boot_info_t boot_info[TZBSP_MAX_CPU_COUNT];
+
+	/* Ring buffer pointer */
+	uint8_t log_buf_p[];
+};
+
+/*
+ * Enumeration order for VMID's
+ */
+enum tzdbg_stats_type {
+	TZDBG_BOOT = 0,
+	TZDBG_RESET,
+	TZDBG_INTERRUPT,
+	TZDBG_VMID,
+	TZDBG_GENERAL,
+	TZDBG_LOG,
+	TZDBG_QSEE_LOG,
+	TZDBG_HYP_GENERAL,
+	TZDBG_HYP_LOG,
+	TZDBG_STATS_MAX
+};
+
+struct tzdbg_stat {
+	char *name;
+	char *data;
+};
+
+struct tzdbg {
+	void __iomem *virt_iobase;
+	void __iomem *hyp_virt_iobase;
+	struct tzdbg_t *diag_buf;
+	struct hypdbg_t *hyp_diag_buf;
+	char *disp_buf;
+	int debug_tz[TZDBG_STATS_MAX];
+	struct tzdbg_stat stat[TZDBG_STATS_MAX];
+	uint32_t hyp_debug_rw_buf_size;
+	bool is_hyplog_enabled;
+	uint32_t tz_version;
+};
+
+static struct tzdbg tzdbg = {
+	.stat[TZDBG_BOOT].name = "boot",
+	.stat[TZDBG_RESET].name = "reset",
+	.stat[TZDBG_INTERRUPT].name = "interrupt",
+	.stat[TZDBG_VMID].name = "vmid",
+	.stat[TZDBG_GENERAL].name = "general",
+	.stat[TZDBG_LOG].name = "log",
+	.stat[TZDBG_QSEE_LOG].name = "qsee_log",
+	.stat[TZDBG_HYP_GENERAL].name = "hyp_general",
+	.stat[TZDBG_HYP_LOG].name = "hyp_log",
+};
+
+static struct tzdbg_log_t *g_qsee_log;
+static uint32_t debug_rw_buf_size;
+
+/*
+ * Debugfs data structure and functions
+ */
+
+static int _disp_tz_general_stats(void)
+{
+	int len = 0;
+
+	len += snprintf(tzdbg.disp_buf + len, debug_rw_buf_size - 1,
+			"   Version        : 0x%x\n"
+			"   Magic Number   : 0x%x\n"
+			"   Number of CPU  : %d\n",
+			tzdbg.diag_buf->version,
+			tzdbg.diag_buf->magic_num,
+			tzdbg.diag_buf->cpu_count);
+	tzdbg.stat[TZDBG_GENERAL].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_vmid_stats(void)
+{
+	int i, num_vmid;
+	int len = 0;
+	struct tzdbg_vmid_t *ptr;
+
+	ptr = (struct tzdbg_vmid_t *)((unsigned char *)tzdbg.diag_buf +
+					tzdbg.diag_buf->vmid_info_off);
+	num_vmid = ((tzdbg.diag_buf->boot_info_off -
+				tzdbg.diag_buf->vmid_info_off)/
+					(sizeof(struct tzdbg_vmid_t)));
+
+	for (i = 0; i < num_vmid; i++) {
+		if (ptr->vmid < 0xFF) {
+			len += snprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"   0x%x        %s\n",
+				(uint32_t)ptr->vmid, (uint8_t *)ptr->desc);
+		}
+		if (len > (debug_rw_buf_size - 1)) {
+			pr_warn("%s: Cannot fit all info into the buffer\n",
+								__func__);
+			break;
+		}
+		ptr++;
+	}
+
+	tzdbg.stat[TZDBG_VMID].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_boot_stats(void)
+{
+	int i;
+	int len = 0;
+	struct tzdbg_boot_info_t *ptr = NULL;
+	struct tzdbg_boot_info64_t *ptr_64 = NULL;
+
+	pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
+	if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) {
+		ptr_64 = (struct tzdbg_boot_info64_t *)((unsigned char *)
+			tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off);
+	} else {
+		ptr = (struct tzdbg_boot_info_t *)((unsigned char *)
+			tzdbg.diag_buf + tzdbg.diag_buf->boot_info_off);
+	}
+
+	for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) {
+		if (tzdbg.tz_version >= QSEE_VERSION_TZ_3_X) {
+			len += snprintf(tzdbg.disp_buf + len,
+					(debug_rw_buf_size - 1) - len,
+					"  CPU #: %d\n"
+					"     Warmboot jump address : 0x%llx\n"
+					"     Warmboot entry CPU counter : 0x%x\n"
+					"     Warmboot exit CPU counter : 0x%x\n"
+					"     Power Collapse entry CPU counter : 0x%x\n"
+					"     Power Collapse exit CPU counter : 0x%x\n"
+					"     Psci entry CPU counter : 0x%x\n"
+					"     Psci exit CPU counter : 0x%x\n"
+					"     Warmboot Jump Address Instruction : 0x%x\n",
+					i, (uint64_t)ptr_64->warm_jmp_addr,
+					ptr_64->wb_entry_cnt,
+					ptr_64->wb_exit_cnt,
+					ptr_64->pc_entry_cnt,
+					ptr_64->pc_exit_cnt,
+					ptr_64->psci_entry_cnt,
+					ptr_64->psci_exit_cnt,
+					ptr_64->warm_jmp_instr);
+
+			if (len > (debug_rw_buf_size - 1)) {
+				pr_warn("%s: Cannot fit all info into the buffer\n",
+						__func__);
+				break;
+			}
+			ptr_64++;
+		} else {
+			len += snprintf(tzdbg.disp_buf + len,
+					(debug_rw_buf_size - 1) - len,
+					"  CPU #: %d\n"
+					"     Warmboot jump address     : 0x%x\n"
+					"     Warmboot entry CPU counter: 0x%x\n"
+					"     Warmboot exit CPU counter : 0x%x\n"
+					"     Power Collapse entry CPU counter: 0x%x\n"
+					"     Power Collapse exit CPU counter : 0x%x\n",
+					i, ptr->warm_jmp_addr,
+					ptr->wb_entry_cnt,
+					ptr->wb_exit_cnt,
+					ptr->pc_entry_cnt,
+					ptr->pc_exit_cnt);
+
+			if (len > (debug_rw_buf_size - 1)) {
+				pr_warn("%s: Cannot fit all info into the buffer\n",
+						__func__);
+				break;
+			}
+			ptr++;
+		}
+	}
+	tzdbg.stat[TZDBG_BOOT].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_reset_stats(void)
+{
+	int i;
+	int len = 0;
+	struct tzdbg_reset_info_t *ptr;
+
+	ptr = (struct tzdbg_reset_info_t *)((unsigned char *)tzdbg.diag_buf +
+					tzdbg.diag_buf->reset_info_off);
+
+	for (i = 0; i < tzdbg.diag_buf->cpu_count; i++) {
+		len += snprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"  CPU #: %d\n"
+				"     Reset Type (reason)       : 0x%x\n"
+				"     Reset counter             : 0x%x\n",
+				i, ptr->reset_type, ptr->reset_cnt);
+
+		if (len > (debug_rw_buf_size - 1)) {
+			pr_warn("%s: Cannot fit all info into the buffer\n",
+								__func__);
+			break;
+		}
+
+		ptr++;
+	}
+	tzdbg.stat[TZDBG_RESET].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_interrupt_stats(void)
+{
+	int i, j, int_info_size;
+	int len = 0;
+	int *num_int;
+	unsigned char *ptr;
+	struct tzdbg_int_t *tzdbg_ptr;
+	struct tzdbg_int_t_tz40 *tzdbg_ptr_tz40;
+
+	num_int = (uint32_t *)((unsigned char *)tzdbg.diag_buf +
+			(tzdbg.diag_buf->int_info_off - sizeof(uint32_t)));
+	ptr = ((unsigned char *)tzdbg.diag_buf +
+					tzdbg.diag_buf->int_info_off);
+	int_info_size = ((tzdbg.diag_buf->ring_off -
+				tzdbg.diag_buf->int_info_off)/(*num_int));
+
+	pr_info("qsee_version = 0x%x\n", tzdbg.tz_version);
+
+	if (tzdbg.tz_version < QSEE_VERSION_TZ_4_X) {
+		for (i = 0; i < (*num_int); i++) {
+			tzdbg_ptr = (struct tzdbg_int_t *)ptr;
+			len += snprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"     Interrupt Number          : 0x%x\n"
+				"     Type of Interrupt         : 0x%x\n"
+				"     Description of interrupt  : %s\n",
+				tzdbg_ptr->int_num,
+				(uint32_t)tzdbg_ptr->int_info,
+				(uint8_t *)tzdbg_ptr->int_desc);
+			for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) {
+				len += snprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"     int_count on CPU # %d      : %u\n",
+				(uint32_t)j,
+				(uint32_t)tzdbg_ptr->int_count[j]);
+			}
+			len += snprintf(tzdbg.disp_buf + len,
+					debug_rw_buf_size - 1, "\n");
+
+			if (len > (debug_rw_buf_size - 1)) {
+				pr_warn("%s: Cannot fit all info into buf\n",
+								__func__);
+				break;
+			}
+			ptr += int_info_size;
+		}
+	} else {
+		for (i = 0; i < (*num_int); i++) {
+			tzdbg_ptr_tz40 = (struct tzdbg_int_t_tz40 *)ptr;
+			len += snprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"     Interrupt Number          : 0x%x\n"
+				"     Type of Interrupt         : 0x%x\n"
+				"     Description of interrupt  : %s\n",
+				tzdbg_ptr_tz40->int_num,
+				(uint32_t)tzdbg_ptr_tz40->int_info,
+				(uint8_t *)tzdbg_ptr_tz40->int_desc);
+			for (j = 0; j < tzdbg.diag_buf->cpu_count; j++) {
+				len += snprintf(tzdbg.disp_buf + len,
+				(debug_rw_buf_size - 1) - len,
+				"     int_count on CPU # %d      : %u\n",
+				(uint32_t)j,
+				(uint32_t)tzdbg_ptr_tz40->int_count[j]);
+			}
+			len += snprintf(tzdbg.disp_buf + len,
+					debug_rw_buf_size - 1, "\n");
+
+			if (len > (debug_rw_buf_size - 1)) {
+				pr_warn("%s: Cannot fit all info into buf\n",
+								__func__);
+				break;
+			}
+			ptr += int_info_size;
+		}
+	}
+
+	tzdbg.stat[TZDBG_INTERRUPT].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_log_stats_legacy(void)
+{
+	int len = 0;
+	unsigned char *ptr;
+
+	ptr = (unsigned char *)tzdbg.diag_buf +
+					tzdbg.diag_buf->ring_off;
+	len += snprintf(tzdbg.disp_buf, (debug_rw_buf_size - 1) - len,
+							"%s\n", ptr);
+
+	tzdbg.stat[TZDBG_LOG].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_log_stats(struct tzdbg_log_t *log,
+			struct tzdbg_log_pos_t *log_start, uint32_t log_len,
+			size_t count, uint32_t buf_idx)
+{
+	uint32_t wrap_start;
+	uint32_t wrap_end;
+	uint32_t wrap_cnt;
+	int max_len;
+	int len = 0;
+	int i = 0;
+
+	wrap_start = log_start->wrap;
+	wrap_end = log->log_pos.wrap;
+
+	/* Calculate difference in # of buffer wrap-arounds */
+	if (wrap_end >= wrap_start) {
+		wrap_cnt = wrap_end - wrap_start;
+	} else {
+		/* wrap counter has wrapped around, invalidate start position */
+		wrap_cnt = 2;
+	}
+
+	if (wrap_cnt > 1) {
+		/* end position has wrapped around more than once, */
+		/* current start no longer valid                   */
+		log_start->wrap = log->log_pos.wrap - 1;
+		log_start->offset = (log->log_pos.offset + 1) % log_len;
+	} else if ((wrap_cnt == 1) &&
+		(log->log_pos.offset > log_start->offset)) {
+		/* end position has overwritten start */
+		log_start->offset = (log->log_pos.offset + 1) % log_len;
+	}
+
+	while (log_start->offset == log->log_pos.offset) {
+		/*
+		 * No data in ring buffer,
+		 * so we'll hang around until something happens
+		 */
+		unsigned long t = msleep_interruptible(50);
+
+		if (t != 0) {
+			/* Some event woke us up, so let's quit */
+			return 0;
+		}
+
+		if (buf_idx == TZDBG_LOG)
+			memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
+						debug_rw_buf_size);
+
+	}
+
+	max_len = (count > debug_rw_buf_size) ? debug_rw_buf_size : count;
+
+	/*
+	 *  Read from ring buff while there is data and space in return buff
+	 */
+	while ((log_start->offset != log->log_pos.offset) && (len < max_len)) {
+		tzdbg.disp_buf[i++] = log->log_buf[log_start->offset];
+		log_start->offset = (log_start->offset + 1) % log_len;
+		if (log_start->offset == 0)
+			++log_start->wrap;
+		++len;
+	}
+
+	/*
+	 * return buffer to caller
+	 */
+	tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int __disp_hyp_log_stats(uint8_t *log,
+			struct hypdbg_log_pos_t *log_start, uint32_t log_len,
+			size_t count, uint32_t buf_idx)
+{
+	struct hypdbg_t *hyp = tzdbg.hyp_diag_buf;
+	unsigned long t = 0;
+	uint32_t wrap_start;
+	uint32_t wrap_end;
+	uint32_t wrap_cnt;
+	int max_len;
+	int len = 0;
+	int i = 0;
+
+	wrap_start = log_start->wrap;
+	wrap_end = hyp->log_pos.wrap;
+
+	/* Calculate difference in # of buffer wrap-arounds */
+	if (wrap_end >= wrap_start) {
+		wrap_cnt = wrap_end - wrap_start;
+	} else {
+		/* wrap counter has wrapped around, invalidate start position */
+		wrap_cnt = 2;
+	}
+
+	if (wrap_cnt > 1) {
+		/* end position has wrapped around more than once, */
+		/* current start no longer valid                   */
+		log_start->wrap = hyp->log_pos.wrap - 1;
+		log_start->offset = (hyp->log_pos.offset + 1) % log_len;
+	} else if ((wrap_cnt == 1) &&
+		(hyp->log_pos.offset > log_start->offset)) {
+		/* end position has overwritten start */
+		log_start->offset = (hyp->log_pos.offset + 1) % log_len;
+	}
+
+	while (log_start->offset == hyp->log_pos.offset) {
+		/*
+		 * No data in ring buffer,
+		 * so we'll hang around until something happens
+		 */
+		t = msleep_interruptible(50);
+		if (t != 0) {
+			/* Some event woke us up, so let's quit */
+			return 0;
+		}
+
+		/* TZDBG_HYP_LOG */
+		memcpy_fromio((void *)tzdbg.hyp_diag_buf, tzdbg.hyp_virt_iobase,
+						tzdbg.hyp_debug_rw_buf_size);
+	}
+
+	max_len = (count > tzdbg.hyp_debug_rw_buf_size) ?
+				tzdbg.hyp_debug_rw_buf_size : count;
+
+	/*
+	 *  Read from ring buff while there is data and space in return buff
+	 */
+	while ((log_start->offset != hyp->log_pos.offset) && (len < max_len)) {
+		tzdbg.disp_buf[i++] = log[log_start->offset];
+		log_start->offset = (log_start->offset + 1) % log_len;
+		if (log_start->offset == 0)
+			++log_start->wrap;
+		++len;
+	}
+
+	/*
+	 * return buffer to caller
+	 */
+	tzdbg.stat[buf_idx].data = tzdbg.disp_buf;
+	return len;
+}
+
+static int _disp_tz_log_stats(size_t count)
+{
+	static struct tzdbg_log_pos_t log_start = {0};
+	struct tzdbg_log_t *log_ptr;
+
+	log_ptr = (struct tzdbg_log_t *)((unsigned char *)tzdbg.diag_buf +
+				tzdbg.diag_buf->ring_off -
+				offsetof(struct tzdbg_log_t, log_buf));
+
+	return _disp_log_stats(log_ptr, &log_start,
+				tzdbg.diag_buf->ring_len, count, TZDBG_LOG);
+}
+
+static int _disp_hyp_log_stats(size_t count)
+{
+	static struct hypdbg_log_pos_t log_start = {0};
+	uint8_t *log_ptr;
+
+	log_ptr = (uint8_t *)((unsigned char *)tzdbg.hyp_diag_buf +
+				tzdbg.hyp_diag_buf->ring_off);
+
+	return __disp_hyp_log_stats(log_ptr, &log_start,
+			tzdbg.hyp_debug_rw_buf_size, count, TZDBG_HYP_LOG);
+}
+
+static int _disp_qsee_log_stats(size_t count)
+{
+	static struct tzdbg_log_pos_t log_start = {0};
+
+	return _disp_log_stats(g_qsee_log, &log_start,
+			QSEE_LOG_BUF_SIZE - sizeof(struct tzdbg_log_pos_t),
+			count, TZDBG_QSEE_LOG);
+}
+
+static int _disp_hyp_general_stats(size_t count)
+{
+	int len = 0;
+	int i;
+	struct hypdbg_boot_info_t *ptr = NULL;
+
+	len += snprintf((unsigned char *)tzdbg.disp_buf + len,
+			tzdbg.hyp_debug_rw_buf_size - 1,
+			"   Magic Number    : 0x%x\n"
+			"   CPU Count       : 0x%x\n"
+			"   S2 Fault Counter: 0x%x\n",
+			tzdbg.hyp_diag_buf->magic_num,
+			tzdbg.hyp_diag_buf->cpu_count,
+			tzdbg.hyp_diag_buf->s2_fault_counter);
+
+	ptr = tzdbg.hyp_diag_buf->boot_info;
+	for (i = 0; i < tzdbg.hyp_diag_buf->cpu_count; i++) {
+		len += snprintf((unsigned char *)tzdbg.disp_buf + len,
+				(tzdbg.hyp_debug_rw_buf_size - 1) - len,
+				"  CPU #: %d\n"
+				"     Warmboot entry CPU counter: 0x%x\n"
+				"     Warmboot exit CPU counter : 0x%x\n",
+				i, ptr->warm_entry_cnt, ptr->warm_exit_cnt);
+
+		if (len > (tzdbg.hyp_debug_rw_buf_size - 1)) {
+			pr_warn("%s: Cannot fit all info into the buffer\n",
+								__func__);
+			break;
+		}
+		ptr++;
+	}
+
+	tzdbg.stat[TZDBG_HYP_GENERAL].data = (char *)tzdbg.disp_buf;
+	return len;
+}
+
+static ssize_t tzdbgfs_read(struct file *file, char __user *buf,
+	size_t count, loff_t *offp)
+{
+	int len = 0;
+	int *tz_id =  file->private_data;
+
+	if (*tz_id == TZDBG_BOOT || *tz_id == TZDBG_RESET ||
+		*tz_id == TZDBG_INTERRUPT || *tz_id == TZDBG_GENERAL ||
+		*tz_id == TZDBG_VMID || *tz_id == TZDBG_LOG)
+		memcpy_fromio((void *)tzdbg.diag_buf, tzdbg.virt_iobase,
+						debug_rw_buf_size);
+
+	if (*tz_id == TZDBG_HYP_GENERAL || *tz_id == TZDBG_HYP_LOG)
+		memcpy_fromio((void *)tzdbg.hyp_diag_buf, tzdbg.hyp_virt_iobase,
+					tzdbg.hyp_debug_rw_buf_size);
+
+	switch (*tz_id) {
+	case TZDBG_BOOT:
+		len = _disp_tz_boot_stats();
+		break;
+	case TZDBG_RESET:
+		len = _disp_tz_reset_stats();
+		break;
+	case TZDBG_INTERRUPT:
+		len = _disp_tz_interrupt_stats();
+		break;
+	case TZDBG_GENERAL:
+		len = _disp_tz_general_stats();
+		break;
+	case TZDBG_VMID:
+		len = _disp_tz_vmid_stats();
+		break;
+	case TZDBG_LOG:
+		if (TZBSP_DIAG_MAJOR_VERSION_LEGACY <
+				(tzdbg.diag_buf->version >> 16)) {
+			len = _disp_tz_log_stats(count);
+			*offp = 0;
+		} else {
+			len = _disp_tz_log_stats_legacy();
+		}
+		break;
+	case TZDBG_QSEE_LOG:
+		len = _disp_qsee_log_stats(count);
+		*offp = 0;
+		break;
+	case TZDBG_HYP_GENERAL:
+		len = _disp_hyp_general_stats(count);
+		break;
+	case TZDBG_HYP_LOG:
+		len = _disp_hyp_log_stats(count);
+		*offp = 0;
+		break;
+	default:
+		break;
+	}
+
+	if (len > count)
+		len = count;
+
+	return simple_read_from_buffer(buf, len, offp,
+				tzdbg.stat[(*tz_id)].data, len);
+}
+
+static int tzdbgfs_open(struct inode *inode, struct file *pfile)
+{
+	pfile->private_data = inode->i_private;
+	return 0;
+}
+
+const struct file_operations tzdbg_fops = {
+	.owner   = THIS_MODULE,
+	.read    = tzdbgfs_read,
+	.open    = tzdbgfs_open,
+};
+
+static struct ion_client  *g_ion_clnt;
+static struct ion_handle *g_ihandle;
+
+/*
+ * Allocates log buffer from ION, registers the buffer at TZ
+ */
+static void tzdbg_register_qsee_log_buf(void)
+{
+	/* register log buffer scm request */
+	struct qseecom_reg_log_buf_ireq req;
+
+	/* scm response */
+	struct qseecom_command_scm_resp resp = {};
+	ion_phys_addr_t pa = 0;
+	size_t len;
+	int ret = 0;
+
+	/* Create ION msm client */
+	g_ion_clnt = msm_ion_client_create("qsee_log");
+	if (g_ion_clnt == NULL) {
+		pr_err("%s: Ion client cannot be created\n", __func__);
+		return;
+	}
+
+	g_ihandle = ion_alloc(g_ion_clnt, QSEE_LOG_BUF_SIZE,
+			4096, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+	if (IS_ERR_OR_NULL(g_ihandle)) {
+		pr_err("%s: Ion client could not retrieve the handle\n",
+			__func__);
+		goto err1;
+	}
+
+	ret = ion_phys(g_ion_clnt, g_ihandle, &pa, &len);
+	if (ret) {
+		pr_err("%s: Ion conversion to physical address failed\n",
+			__func__);
+		goto err2;
+	}
+
+	req.qsee_cmd_id = QSEOS_REGISTER_LOG_BUF_COMMAND;
+	req.phy_addr = (uint32_t)pa;
+	req.len = len;
+
+	if (!is_scm_armv8()) {
+		/*  SCM_CALL  to register the log buffer */
+		ret = scm_call(SCM_SVC_TZSCHEDULER, 1,  &req, sizeof(req),
+			&resp, sizeof(resp));
+	} else {
+		struct scm_desc desc = {0};
+
+		desc.args[0] = pa;
+		desc.args[1] = len;
+		desc.arginfo = 0x22;
+		ret = scm_call2(SCM_QSEEOS_FNID(1, 6), &desc);
+		resp.result = desc.ret[0];
+	}
+
+	if (ret) {
+		pr_err("%s: scm_call to register log buffer failed\n",
+			__func__);
+		goto err2;
+	}
+
+	if (resp.result != QSEOS_RESULT_SUCCESS) {
+		pr_err(
+		"%s: scm_call to register log buf failed, resp result =%d\n",
+		__func__, resp.result);
+		goto err2;
+	}
+
+	g_qsee_log =
+		(struct tzdbg_log_t *)ion_map_kernel(g_ion_clnt, g_ihandle);
+
+	if (IS_ERR(g_qsee_log)) {
+		pr_err("%s: Couldn't map ion buffer to kernel\n",
+			__func__);
+		goto err2;
+	}
+
+	g_qsee_log->log_pos.wrap = g_qsee_log->log_pos.offset = 0;
+	return;
+
+err2:
+	ion_free(g_ion_clnt, g_ihandle);
+	g_ihandle = NULL;
+err1:
+	ion_client_destroy(g_ion_clnt);
+	g_ion_clnt = NULL;
+}
+
+static int  tzdbgfs_init(struct platform_device *pdev)
+{
+	int rc = 0;
+	int i;
+	struct dentry           *dent_dir;
+	struct dentry           *dent;
+
+	dent_dir = debugfs_create_dir("tzdbg", NULL);
+	if (dent_dir == NULL) {
+		dev_err(&pdev->dev, "tzdbg debugfs_create_dir failed\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < TZDBG_STATS_MAX; i++) {
+		tzdbg.debug_tz[i] = i;
+		dent = debugfs_create_file(tzdbg.stat[i].name,
+				0444, dent_dir,
+				&tzdbg.debug_tz[i], &tzdbg_fops);
+		if (dent == NULL) {
+			dev_err(&pdev->dev, "TZ debugfs_create_file failed\n");
+			rc = -ENOMEM;
+			goto err;
+		}
+	}
+	tzdbg.disp_buf = kzalloc(max(debug_rw_buf_size,
+			tzdbg.hyp_debug_rw_buf_size), GFP_KERNEL);
+	if (tzdbg.disp_buf == NULL)
+		goto err;
+	platform_set_drvdata(pdev, dent_dir);
+	return 0;
+err:
+	debugfs_remove_recursive(dent_dir);
+
+	return rc;
+}
+
+static void tzdbgfs_exit(struct platform_device *pdev)
+{
+	struct dentry           *dent_dir;
+
+	kzfree(tzdbg.disp_buf);
+	dent_dir = platform_get_drvdata(pdev);
+	debugfs_remove_recursive(dent_dir);
+	if (g_ion_clnt != NULL) {
+		if (!IS_ERR_OR_NULL(g_ihandle)) {
+			ion_unmap_kernel(g_ion_clnt, g_ihandle);
+			ion_free(g_ion_clnt, g_ihandle);
+		}
+		ion_client_destroy(g_ion_clnt);
+	}
+}
+
+static int __update_hypdbg_base(struct platform_device *pdev,
+			void __iomem *virt_iobase)
+{
+	phys_addr_t hypdiag_phy_iobase;
+	uint32_t hyp_address_offset;
+	uint32_t hyp_size_offset;
+	struct hypdbg_t *hyp;
+	uint32_t *ptr = NULL;
+
+	if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-address-offset",
+							&hyp_address_offset)) {
+		dev_err(&pdev->dev, "hyplog address offset is not defined\n");
+		return -EINVAL;
+	}
+	if (of_property_read_u32((&pdev->dev)->of_node, "hyplog-size-offset",
+							&hyp_size_offset)) {
+		dev_err(&pdev->dev, "hyplog size offset is not defined\n");
+		return -EINVAL;
+	}
+
+	hypdiag_phy_iobase = readl_relaxed(virt_iobase + hyp_address_offset);
+	tzdbg.hyp_debug_rw_buf_size = readl_relaxed(virt_iobase +
+					hyp_size_offset);
+
+	tzdbg.hyp_virt_iobase = devm_ioremap_nocache(&pdev->dev,
+					hypdiag_phy_iobase,
+					tzdbg.hyp_debug_rw_buf_size);
+	if (!tzdbg.hyp_virt_iobase) {
+		dev_err(&pdev->dev, "ERROR could not ioremap: start=%pr, len=%u\n",
+			&hypdiag_phy_iobase, tzdbg.hyp_debug_rw_buf_size);
+		return -ENXIO;
+	}
+
+	ptr = kzalloc(tzdbg.hyp_debug_rw_buf_size, GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	tzdbg.hyp_diag_buf = (struct hypdbg_t *)ptr;
+	hyp = tzdbg.hyp_diag_buf;
+	hyp->log_pos.wrap = hyp->log_pos.offset = 0;
+	return 0;
+}
+
+static void tzdbg_get_tz_version(void)
+{
+	uint32_t smc_id = 0;
+	uint32_t feature = 10;
+	struct qseecom_command_scm_resp resp = {0};
+	struct scm_desc desc = {0};
+	int ret = 0;
+
+	if (!is_scm_armv8()) {
+		ret = scm_call(SCM_SVC_INFO, SCM_SVC_UTIL,  &feature,
+					sizeof(feature), &resp, sizeof(resp));
+	} else {
+		smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
+		desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
+		desc.args[0] = feature;
+		ret = scm_call2(smc_id, &desc);
+		resp.result = desc.ret[0];
+	}
+
+	if (ret)
+		pr_err("%s: scm_call to get tz version failed\n",
+				__func__);
+	else
+		tzdbg.tz_version = resp.result;
+
+}
+
+/*
+ * Driver functions
+ */
+static int tz_log_probe(struct platform_device *pdev)
+{
+	struct resource *resource;
+	void __iomem *virt_iobase;
+	phys_addr_t tzdiag_phy_iobase;
+	uint32_t *ptr = NULL;
+	int ret = 0;
+
+	/*
+	 * Get address that stores the physical location diagnostic data
+	 */
+	resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!resource) {
+		dev_err(&pdev->dev,
+				"%s: ERROR Missing MEM resource\n", __func__);
+		return -ENXIO;
+	};
+
+	/*
+	 * Get the debug buffer size
+	 */
+	debug_rw_buf_size = resource->end - resource->start + 1;
+
+	/*
+	 * Map address that stores the physical location diagnostic data
+	 */
+	virt_iobase = devm_ioremap_nocache(&pdev->dev, resource->start,
+				debug_rw_buf_size);
+	if (!virt_iobase) {
+		dev_err(&pdev->dev,
+			"%s: ERROR could not ioremap: start=%pr, len=%u\n",
+			__func__, &resource->start,
+			(unsigned int)(debug_rw_buf_size));
+		return -ENXIO;
+	}
+
+	if (pdev->dev.of_node) {
+		tzdbg.is_hyplog_enabled = of_property_read_bool(
+			(&pdev->dev)->of_node, "qcom,hyplog-enabled");
+		if (tzdbg.is_hyplog_enabled) {
+			ret = __update_hypdbg_base(pdev, virt_iobase);
+			if (ret) {
+				dev_err(&pdev->dev, "%s() failed to get device tree data ret = %d\n",
+						__func__, ret);
+				return -EINVAL;
+			}
+		} else {
+			dev_info(&pdev->dev, "Hyp log service is not supported\n");
+		}
+	} else {
+		dev_dbg(&pdev->dev, "Device tree data is not found\n");
+	}
+
+	/*
+	 * Retrieve the address of diagnostic data
+	 */
+	tzdiag_phy_iobase = readl_relaxed(virt_iobase);
+
+	/*
+	 * Map the diagnostic information area
+	 */
+	tzdbg.virt_iobase = devm_ioremap_nocache(&pdev->dev,
+				tzdiag_phy_iobase, debug_rw_buf_size);
+
+	if (!tzdbg.virt_iobase) {
+		dev_err(&pdev->dev,
+			"%s: ERROR could not ioremap: start=%pr, len=%u\n",
+			__func__, &tzdiag_phy_iobase,
+			debug_rw_buf_size);
+		return -ENXIO;
+	}
+
+	ptr = kzalloc(debug_rw_buf_size, GFP_KERNEL);
+	if (ptr == NULL)
+		return -ENXIO;
+
+	tzdbg.diag_buf = (struct tzdbg_t *)ptr;
+
+	if (tzdbgfs_init(pdev))
+		goto err;
+
+	tzdbg_register_qsee_log_buf();
+
+	tzdbg_get_tz_version();
+
+	return 0;
+err:
+	kfree(tzdbg.diag_buf);
+	return -ENXIO;
+}
+
+
+static int tz_log_remove(struct platform_device *pdev)
+{
+	kzfree(tzdbg.diag_buf);
+	if (tzdbg.hyp_diag_buf)
+		kzfree(tzdbg.hyp_diag_buf);
+	tzdbgfs_exit(pdev);
+
+	return 0;
+}
+
+static const struct of_device_id tzlog_match[] = {
+	{	.compatible = "qcom,tz-log",
+	},
+	{}
+};
+
+static struct platform_driver tz_log_driver = {
+	.probe		= tz_log_probe,
+	.remove		= tz_log_remove,
+	.driver		= {
+		.name = "tz_log",
+		.owner = THIS_MODULE,
+		.of_match_table = tzlog_match,
+		.probe_type = PROBE_PREFER_ASYNCHRONOUS,
+	},
+};
+
+static int __init tz_log_init(void)
+{
+	return platform_driver_register(&tz_log_driver);
+}
+
+static void __exit tz_log_exit(void)
+{
+	platform_driver_unregister(&tz_log_driver);
+}
+
+module_init(tz_log_init);
+module_exit(tz_log_exit);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TZ Log driver");
+MODULE_ALIAS("platform:tz_log");
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 9d2e95b..ced015f 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -305,6 +305,7 @@
 	}
 
 	sde_dbg_destroy();
+	debugfs_remove_recursive(priv->debug_root);
 
 	component_unbind_all(dev, ddev);
 	sde_power_client_destroy(&priv->phandle, priv->pclient);
@@ -611,7 +612,16 @@
 	if (ret)
 		goto fail;
 
-	ret = sde_dbg_debugfs_register(ddev->primary->debugfs_root);
+	priv->debug_root = debugfs_create_dir("debug",
+					ddev->primary->debugfs_root);
+	if (IS_ERR_OR_NULL(priv->debug_root)) {
+		pr_err("debugfs_root create_dir fail, error %ld\n",
+		       PTR_ERR(priv->debug_root));
+		priv->debug_root = NULL;
+		goto fail;
+	}
+
+	ret = sde_dbg_debugfs_register(priv->debug_root);
 	if (ret) {
 		dev_err(dev, "failed to reg sde dbg debugfs: %d\n", ret);
 		goto fail;
@@ -1696,6 +1706,13 @@
 	{ },
 };
 
+#ifdef CONFIG_QCOM_KGSL
+static int add_gpu_components(struct device *dev,
+			      struct component_match **matchptr)
+{
+	return 0;
+}
+#else
 static int add_gpu_components(struct device *dev,
 			      struct component_match **matchptr)
 {
@@ -1711,6 +1728,7 @@
 
 	return 0;
 }
+#endif
 
 static int msm_drm_bind(struct device *dev)
 {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index f2fccd7..da76fbc 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -492,6 +492,9 @@
 
 	/* whether registered and drm_dev_unregister should be called */
 	bool registered;
+
+	/* msm drv debug root node */
+	struct dentry *debug_root;
 };
 
 struct msm_format {
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index cec8792..54acf41a 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -592,8 +592,9 @@
 
 	lm->ops.setup_blend_config(lm, pstate->stage, fg_alpha,
 						bg_alpha, blend_op);
-	SDE_DEBUG("format 0x%x, alpha_enable %u fg alpha:0x%x bg alpha:0x%x \"\
-		 blend_op:0x%x\n", format->base.pixel_format,
+	SDE_DEBUG(
+		"format: %4.4s, alpha_enable %u fg alpha:0x%x bg alpha:0x%x blend_op:0x%x\n",
+		(char *) &format->base.pixel_format,
 		format->alpha_enable, fg_alpha, bg_alpha, blend_op);
 }
 
@@ -663,6 +664,8 @@
 	struct sde_crtc *sde_crtc, struct sde_crtc_mixer *mixer)
 {
 	struct drm_plane *plane;
+	struct drm_framebuffer *fb;
+	struct drm_plane_state *state;
 	struct sde_crtc_state *cstate;
 	struct sde_plane_state *pstate = NULL;
 	struct sde_format *format;
@@ -691,8 +694,12 @@
 	crtc_split_width = get_crtc_split_width(crtc);
 
 	drm_atomic_crtc_for_each_plane(plane, crtc) {
+		state = plane->state;
+		if (!state)
+			continue;
 
-		pstate = to_sde_plane_state(plane->state);
+		pstate = to_sde_plane_state(state);
+		fb = state->fb;
 
 		if (sde_plane_is_sbuf_mode(plane, &prefill))
 			sbuf_mode = true;
@@ -700,7 +707,7 @@
 		sde_plane_get_ctl_flush(plane, ctl, &flush_mask);
 
 		/* always stage plane on either left or right lm */
-		if (plane->state->crtc_x >= crtc_split_width) {
+		if (state->crtc_x >= crtc_split_width) {
 			lm_idx = RIGHT_MIXER;
 			idx = right_crtc_zpos_cnt[pstate->stage]++;
 		} else {
@@ -710,8 +717,7 @@
 
 		/* stage plane on right LM if it crosses the boundary */
 		lm_right = (lm_idx == LEFT_MIXER) &&
-		   (plane->state->crtc_x + plane->state->crtc_w >
-							crtc_split_width);
+		   (state->crtc_x + state->crtc_w > crtc_split_width);
 
 		stage_cfg->stage[lm_idx][pstate->stage][idx] =
 							sde_plane_pipe(plane);
@@ -725,11 +731,18 @@
 				pstate->stage,
 				plane->base.id,
 				sde_plane_pipe(plane) - SSPP_VIG0,
-				plane->state->fb ?
-				plane->state->fb->base.id : -1);
+				state->fb ? state->fb->base.id : -1);
 
 		format = to_sde_format(msm_framebuffer_format(pstate->base.fb));
 
+		SDE_EVT32(DRMID(plane), state->src_x, state->src_y,
+			state->src_w >> 16, state->src_h >> 16, state->crtc_x,
+			state->crtc_y, state->crtc_w, state->crtc_h);
+		SDE_EVT32(DRMID(plane), DRMID(crtc), lm_idx, lm_right,
+			pstate->stage, pstate->multirect_index,
+			pstate->multirect_mode, format->base.pixel_format,
+			fb ? fb->modifier[0] : 0);
+
 		/* blend config update */
 		if (pstate->stage != SDE_STAGE_BASE) {
 			_sde_crtc_setup_blend_cfg(mixer + lm_idx, pstate,
@@ -872,7 +885,7 @@
 
 	sde_crtc = to_sde_crtc(crtc);
 	cstate = to_sde_crtc_state(crtc->state);
-	SDE_EVT32(DRMID(crtc));
+	SDE_EVT32_VERBOSE(DRMID(crtc));
 
 	/* identify connectors attached to this crtc */
 	cstate->num_connectors = 0;
@@ -954,7 +967,7 @@
 	_sde_crtc_complete_flip(crtc, NULL);
 	drm_crtc_handle_vblank(crtc);
 	DRM_DEBUG_VBL("crtc%d\n", crtc->base.id);
-	SDE_EVT32_IRQ(DRMID(crtc));
+	SDE_EVT32_VERBOSE(DRMID(crtc));
 }
 
 static void sde_crtc_frame_event_work(struct kthread_work *work)
@@ -1000,7 +1013,8 @@
 					crtc->base.id,
 					ktime_to_ns(fevent->ts),
 					atomic_read(&sde_crtc->frame_pending));
-			SDE_EVT32(DRMID(crtc), fevent->event, 0);
+			SDE_EVT32(DRMID(crtc), fevent->event,
+							SDE_EVTLOG_FUNC_CASE1);
 
 			/* don't propagate unexpected frame done events */
 			return;
@@ -1009,16 +1023,18 @@
 			SDE_DEBUG("crtc%d ts:%lld last pending\n",
 					crtc->base.id,
 					ktime_to_ns(fevent->ts));
-			SDE_EVT32(DRMID(crtc), fevent->event, 1);
+			SDE_EVT32(DRMID(crtc), fevent->event,
+							SDE_EVTLOG_FUNC_CASE2);
 			sde_core_perf_crtc_release_bw(crtc);
 		} else {
-			SDE_EVT32(DRMID(crtc), fevent->event, 2);
+			SDE_EVT32_VERBOSE(DRMID(crtc), fevent->event,
+							SDE_EVTLOG_FUNC_CASE3);
 		}
 	} else {
 		SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id,
 				ktime_to_ns(fevent->ts),
 				fevent->event);
-		SDE_EVT32(DRMID(crtc), fevent->event, 3);
+		SDE_EVT32(DRMID(crtc), fevent->event, SDE_EVTLOG_FUNC_CASE4);
 	}
 
 	if (fevent->event & SDE_ENCODER_FRAME_EVENT_PANEL_DEAD)
@@ -1048,8 +1064,7 @@
 	pipe_id = drm_crtc_index(crtc);
 
 	SDE_DEBUG("crtc%d\n", crtc->base.id);
-
-	SDE_EVT32(DRMID(crtc), event);
+	SDE_EVT32_VERBOSE(DRMID(crtc));
 
 	spin_lock_irqsave(&sde_crtc->spin_lock, flags);
 	fevent = list_first_entry_or_null(&sde_crtc->frame_event_list,
@@ -1085,7 +1100,7 @@
 
 	sde_crtc = to_sde_crtc(crtc);
 	cstate = to_sde_crtc_state(crtc->state);
-	SDE_EVT32(DRMID(crtc));
+	SDE_EVT32_VERBOSE(DRMID(crtc));
 
 	/* signal output fence(s) at end of commit */
 	sde_fence_signal(&sde_crtc->output_fence, 0);
@@ -1709,7 +1724,8 @@
 	if (atomic_read(&sde_crtc->vblank_refcount) && !sde_crtc->suspend) {
 		SDE_ERROR("crtc%d invalid vblank refcount\n",
 				crtc->base.id);
-		SDE_EVT32(DRMID(crtc));
+		SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->vblank_refcount),
+							SDE_EVTLOG_FUNC_CASE1);
 		drm_for_each_encoder(encoder, crtc->dev) {
 			if (encoder->crtc != crtc)
 				continue;
@@ -1723,7 +1739,8 @@
 		/* release bandwidth and other resources */
 		SDE_ERROR("crtc%d invalid frame pending\n",
 				crtc->base.id);
-		SDE_EVT32(DRMID(crtc));
+		SDE_EVT32(DRMID(crtc), atomic_read(&sde_crtc->frame_pending),
+							SDE_EVTLOG_FUNC_CASE2);
 		sde_core_perf_crtc_release_bw(crtc);
 		atomic_set(&sde_crtc->frame_pending, 0);
 	}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 7137aaa..20d5e52 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -1013,7 +1013,7 @@
 		if (sde_enc->phys_encs[i] == ready_phys) {
 			clear_bit(i, sde_enc->frame_busy_mask);
 			sde_enc->crtc_frame_event |= event;
-			SDE_EVT32(DRMID(drm_enc), i,
+			SDE_EVT32_VERBOSE(DRMID(drm_enc), i,
 					sde_enc->frame_busy_mask[0]);
 		}
 
@@ -1053,14 +1053,18 @@
 	}
 
 	pending_kickoff_cnt = sde_encoder_phys_inc_pending(phys);
-	SDE_EVT32(DRMID(&to_sde_encoder_virt(drm_enc)->base),
-			phys->intf_idx, pending_kickoff_cnt);
 
 	if (extra_flush_bits && ctl->ops.update_pending_flush)
 		ctl->ops.update_pending_flush(ctl, extra_flush_bits);
 
 	ctl->ops.trigger_flush(ctl);
-	SDE_EVT32(DRMID(drm_enc), ctl->idx);
+
+	if (ctl->ops.get_pending_flush)
+		SDE_EVT32(DRMID(drm_enc), phys->intf_idx, pending_kickoff_cnt,
+			ctl->idx, ctl->ops.get_pending_flush(ctl));
+	else
+		SDE_EVT32(DRMID(drm_enc), phys->intf_idx, ctl->idx,
+						pending_kickoff_cnt);
 }
 
 /**
@@ -1081,7 +1085,6 @@
 void sde_encoder_helper_trigger_start(struct sde_encoder_phys *phys_enc)
 {
 	struct sde_hw_ctl *ctl;
-	int ctl_idx = -1;
 
 	if (!phys_enc) {
 		SDE_ERROR("invalid encoder\n");
@@ -1091,11 +1094,8 @@
 	ctl = phys_enc->hw_ctl;
 	if (ctl && ctl->ops.trigger_start) {
 		ctl->ops.trigger_start(ctl);
-		ctl_idx = ctl->idx;
+		SDE_EVT32(DRMID(phys_enc->parent), ctl->idx);
 	}
-
-	if (phys_enc && phys_enc->parent)
-		SDE_EVT32(DRMID(phys_enc->parent), ctl_idx);
 }
 
 int sde_encoder_helper_wait_event_timeout(
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index 86e292f..a68da4e 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -167,6 +167,10 @@
 		do_log = true;
 	}
 
+	SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
+			cmd_enc->pp_timeout_report_cnt,
+			atomic_read(&phys_enc->pending_kickoff_cnt));
+
 	/* to avoid flooding, only log first time, and "dead" time */
 	if (do_log) {
 		SDE_ERROR_CMDENC(cmd_enc,
@@ -176,10 +180,7 @@
 				cmd_enc->pp_timeout_report_cnt,
 				atomic_read(&phys_enc->pending_kickoff_cnt));
 
-		SDE_EVT32(DRMID(phys_enc->parent),
-				phys_enc->hw_pp->idx - PINGPONG_0,
-				0xbad, cmd_enc->pp_timeout_report_cnt,
-				atomic_read(&phys_enc->pending_kickoff_cnt));
+		SDE_EVT32(DRMID(phys_enc->parent), SDE_EVTLOG_FATAL);
 
 		SDE_DBG_DUMP("sde", "dsi0_ctrl", "dsi0_phy", "dsi1_ctrl",
 				"dsi1_phy", "vbif", "dbg_bus",
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index 01d0d20..e7f3df7 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -636,8 +636,8 @@
 
 	color = _sde_format_get_media_color_ubwc(fmt);
 	if (color < 0) {
-		DRM_ERROR("UBWC format not supported for fmt:0x%X\n",
-			fmt->base.pixel_format);
+		DRM_ERROR("UBWC format not supported for fmt: %4.4s\n",
+			(char *)&fmt->base.pixel_format);
 		return -EINVAL;
 	}
 
@@ -1123,21 +1123,23 @@
 	case DRM_FORMAT_MOD_QCOM_COMPRESSED | DRM_FORMAT_MOD_QCOM_TILE:
 		map = sde_format_map_ubwc;
 		map_size = ARRAY_SIZE(sde_format_map_ubwc);
-		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
-				format);
+		SDE_DEBUG("found fmt: %4.4s  DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
+				(char *)&format);
 		break;
 	case DRM_FORMAT_MOD_QCOM_DX:
 		map = sde_format_map_p010;
 		map_size = ARRAY_SIZE(sde_format_map_p010);
-		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_DX\n", format);
+		SDE_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_DX\n",
+				(char *)&format);
 		break;
 	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED):
 	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
 			DRM_FORMAT_MOD_QCOM_TILE):
 		map = sde_format_map_p010_ubwc;
 		map_size = ARRAY_SIZE(sde_format_map_p010_ubwc);
-		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED/DX\n",
-				format);
+		SDE_DEBUG(
+			"found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED/DX\n",
+				(char *)&format);
 		break;
 	case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
 		DRM_FORMAT_MOD_QCOM_TIGHT):
@@ -1146,26 +1148,28 @@
 		map = sde_format_map_tp10_ubwc;
 		map_size = ARRAY_SIZE(sde_format_map_tp10_ubwc);
 		SDE_DEBUG(
-			"found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED/DX/TIGHT\n",
-			format);
+			"found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED/DX/TIGHT\n",
+				(char *)&format);
 		break;
 	case DRM_FORMAT_MOD_QCOM_TILE:
 		map = sde_format_map_tile;
 		map_size = ARRAY_SIZE(sde_format_map_tile);
-		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE\n", format);
+		SDE_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_TILE\n",
+				(char *)&format);
 		break;
 	case (DRM_FORMAT_MOD_QCOM_TILE | DRM_FORMAT_MOD_QCOM_DX):
 		map = sde_format_map_p010_tile;
 		map_size = ARRAY_SIZE(sde_format_map_p010_tile);
-		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE/DX\n",
-				format);
+		SDE_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_TILE/DX\n",
+				(char *)&format);
 		break;
 	case (DRM_FORMAT_MOD_QCOM_TILE | DRM_FORMAT_MOD_QCOM_DX |
 			DRM_FORMAT_MOD_QCOM_TIGHT):
 		map = sde_format_map_tp10_tile;
 		map_size = ARRAY_SIZE(sde_format_map_tp10_tile);
-		SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE/DX/TIGHT\n",
-				format);
+		SDE_DEBUG(
+			"found fmt: %4.4s DRM_FORMAT_MOD_QCOM_TILE/DX/TIGHT\n",
+				(char *)&format);
 		break;
 	default:
 		SDE_ERROR("unsupported format modifier %llX\n", mod0);
@@ -1180,11 +1184,11 @@
 	}
 
 	if (fmt == NULL)
-		SDE_ERROR("unsupported fmt 0x%X modifier 0x%llX\n",
-				format, mod0);
+		SDE_ERROR("unsupported fmt: %4.4s modifier 0x%llX\n",
+				(char *)&format, mod0);
 	else
-		SDE_DEBUG("fmt %s mod 0x%llX ubwc %d yuv %d\n",
-				drm_get_format_name(format), mod0,
+		SDE_DEBUG("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
+				(char *)&format, mod0,
 				SDE_FORMAT_IS_UBWC(fmt),
 				SDE_FORMAT_IS_YUV(fmt));
 
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
index 18893af..ad2910e 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
@@ -56,6 +56,19 @@
  */
 static u32 offsite_v_coeff[] = {0x00060002};
 
+/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
+static struct sde_csc_cfg rgb2yuv_cfg = {
+	{
+		0x0083, 0x0102, 0x0032,
+		0x1fb5, 0x1f6c, 0x00e1,
+		0x00e1, 0x1f45, 0x1fdc
+	},
+	{ 0x00, 0x00, 0x00 },
+	{ 0x0040, 0x0200, 0x0200 },
+	{ 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
+	{ 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
+};
+
 static struct sde_cdm_cfg *_cdm_offset(enum sde_cdm cdm,
 		struct sde_mdss_cfg *m,
 		void __iomem *addr,
@@ -279,6 +292,11 @@
 
 	sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
 			c->hw.blk_off + c->hw.length, c->hw.xin_id);
+	/*
+	 * Perform any default initialization for the chroma down module
+	 * @setup default csc coefficients
+	 */
+	sde_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
 
 	return c;
 }
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
index 47fb07f..d5289c0 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
@@ -844,11 +844,21 @@
 
 static int sde_hw_intr_clear_irqs(struct sde_hw_intr *intr)
 {
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
+		SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off, 0xffffffff);
+
 	return 0;
 }
 
 static int sde_hw_intr_disable_irqs(struct sde_hw_intr *intr)
 {
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
+		SDE_REG_WRITE(&intr->hw, sde_intr_set[i].en_off, 0x00000000);
+
 	return 0;
 }
 
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 8bc6a2b..7e18a0e 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -57,7 +57,7 @@
  * # echo 0x2 > /sys/module/drm/parameters/debug
  *
  * To enable DRM driver h/w logging
- * # echo <mask> > /sys/kernel/debug/dri/0/hw_log_mask
+ * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
  *
  * See sde_hw_mdss.h for h/w logging mask definitions (search for SDE_DBG_MASK_)
  */
@@ -275,7 +275,13 @@
 
 void *sde_debugfs_get_root(struct sde_kms *sde_kms)
 {
-	return sde_kms ? sde_kms->dev->primary->debugfs_root : 0;
+	struct msm_drm_private *priv;
+
+	if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private)
+		return NULL;
+
+	priv = sde_kms->dev->dev_private;
+	return priv->debug_root;
 }
 
 static int _sde_debugfs_init(struct sde_kms *sde_kms)
@@ -405,11 +411,11 @@
 		if (encoder->crtc != crtc)
 			continue;
 		/*
-		 * Wait post-flush if necessary to delay before plane_cleanup
-		 * For example, wait for vsync in case of video mode panels
-		 * This should be a no-op for command mode panels
+		 * Wait for post-flush if necessary to delay before
+		 * plane_cleanup. For example, wait for vsync in case of video
+		 * mode panels. This may be a no-op for command mode panels.
 		 */
-		SDE_EVT32(DRMID(crtc));
+		SDE_EVT32_VERBOSE(DRMID(crtc));
 		ret = sde_encoder_wait_for_commit_done(encoder);
 		if (ret && ret != -EWOULDBLOCK) {
 			SDE_ERROR("wait for commit done returned %d\n", ret);
@@ -1226,7 +1232,7 @@
 /* the caller api needs to turn on clock before calling it */
 static inline void _sde_kms_core_hw_rev_init(struct sde_kms *sde_kms)
 {
-	return;
+	sde_kms->core_rev = readl_relaxed(sde_kms->mmio + 0x0);
 }
 
 static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 6e1fe33..e8892fb 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -253,9 +253,10 @@
 			((src_width + 32) * fmt->bpp);
 	}
 
-	SDE_DEBUG("plane%u: pnum:%d fmt:%x w:%u fl:%u\n",
+	SDE_DEBUG("plane%u: pnum:%d fmt: %4.4s w:%u fl:%u\n",
 			plane->base.id, psde->pipe - SSPP_VIG0,
-			fmt->base.pixel_format, src_width, total_fl);
+			(char *)&fmt->base.pixel_format,
+			src_width, total_fl);
 
 	return total_fl;
 }
@@ -365,10 +366,10 @@
 			psde->is_rt_pipe, total_fl, qos_lut,
 			(fmt) ? SDE_FORMAT_IS_LINEAR(fmt) : 0);
 
-	SDE_DEBUG("plane%u: pnum:%d fmt:%x rt:%d fl:%u lut:0x%x\n",
+	SDE_DEBUG("plane%u: pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%x\n",
 			plane->base.id,
 			psde->pipe - SSPP_VIG0,
-			(fmt) ? fmt->base.pixel_format : 0,
+			fmt ? (char *)&fmt->base.pixel_format : NULL,
 			psde->is_rt_pipe, total_fl, qos_lut);
 
 	psde->pipe_hw->ops.setup_creq_lut(psde->pipe_hw, &psde->pipe_qos_cfg);
@@ -427,10 +428,10 @@
 			psde->pipe_qos_cfg.danger_lut,
 			psde->pipe_qos_cfg.safe_lut);
 
-	SDE_DEBUG("plane%u: pnum:%d fmt:%x mode:%d luts[0x%x, 0x%x]\n",
+	SDE_DEBUG("plane%u: pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
 		plane->base.id,
 		psde->pipe - SSPP_VIG0,
-		fmt ? fmt->base.pixel_format : 0,
+		fmt ? (char *)&fmt->base.pixel_format : NULL,
 		fmt ? fmt->fetch_mode : -1,
 		psde->pipe_qos_cfg.danger_lut,
 		psde->pipe_qos_cfg.safe_lut);
@@ -620,8 +621,6 @@
 			prefix = sde_sync_get_name_prefix(input_fence);
 			rc = sde_sync_wait(input_fence, wait_ms);
 
-			SDE_EVT32(DRMID(plane), -ret, prefix);
-
 			switch (rc) {
 			case 0:
 				SDE_ERROR_PLANE(psde, "%ums timeout on %08X\n",
@@ -648,6 +647,8 @@
 				ret = 0;
 				break;
 			}
+
+			SDE_EVT32_VERBOSE(DRMID(plane), -ret, prefix);
 		} else {
 			ret = 0;
 		}
@@ -1623,31 +1624,25 @@
 		rstate->out_rotation &= ~DRM_REFLECT_Y;
 
 	SDE_DEBUG(
-		"plane%d.%d rot:%d/%c%c%c%c/%dx%d/%c%c%c%c/%llx/%dx%d+%d+%d\n",
+		"plane%d.%d rot:%d/%c%c%c%c/%dx%d/%4.4s/%llx/%dx%d+%d+%d\n",
 			plane->base.id, rstate->sequence_id, hw_cmd,
 			rot_cmd->rot90 ? 'r' : '_',
 			rot_cmd->hflip ? 'h' : '_',
 			rot_cmd->vflip ? 'v' : '_',
 			rot_cmd->video_mode ? 'V' : 'C',
 			state->fb->width, state->fb->height,
-			state->fb->pixel_format >> 0,
-			state->fb->pixel_format >> 8,
-			state->fb->pixel_format >> 16,
-			state->fb->pixel_format >> 24,
+			(char *) &state->fb->pixel_format,
 			state->fb->modifier[0],
 			drm_rect_width(&rstate->in_rot_rect) >> 16,
 			drm_rect_height(&rstate->in_rot_rect) >> 16,
 			rstate->in_rot_rect.x1 >> 16,
 			rstate->in_rot_rect.y1 >> 16);
 
-	SDE_DEBUG("plane%d.%d sspp:%d/%x/%dx%d/%c%c%c%c/%llx/%dx%d+%d+%d\n",
+	SDE_DEBUG("plane%d.%d sspp:%d/%x/%dx%d/%4.4s/%llx/%dx%d+%d+%d\n",
 			plane->base.id, rstate->sequence_id, hw_cmd,
 			rstate->out_rotation,
 			rstate->out_fb_width, rstate->out_fb_height,
-			rstate->out_fb_pixel_format >> 0,
-			rstate->out_fb_pixel_format >> 8,
-			rstate->out_fb_pixel_format >> 16,
-			rstate->out_fb_pixel_format >> 24,
+			(char *) &rstate->out_fb_pixel_format,
 			rstate->out_fb_modifier[0],
 			rstate->out_src_w >> 16, rstate->out_src_h >> 16,
 			rstate->out_src_x >> 16, rstate->out_src_y >> 16);
@@ -2235,13 +2230,10 @@
 	nplanes = fmt->num_planes;
 
 	SDE_DEBUG(
-		"plane%d.%d sspp:%dx%d/%c%c%c%c/%llx/%dx%d+%d+%d/%x crtc:%dx%d+%d+%d\n",
+		"plane%d.%d sspp:%dx%d/%4.4s/%llx/%dx%d+%d+%d/%x crtc:%dx%d+%d+%d\n",
 			plane->base.id, rstate->sequence_id,
 			rstate->out_fb_width, rstate->out_fb_height,
-			rstate->out_fb_pixel_format >> 0,
-			rstate->out_fb_pixel_format >> 8,
-			rstate->out_fb_pixel_format >> 16,
-			rstate->out_fb_pixel_format >> 24,
+			(char *) &rstate->out_fb_pixel_format,
 			rstate->out_fb_modifier[0],
 			rstate->out_src_w >> 16, rstate->out_src_h >> 16,
 			rstate->out_src_x >> 16, rstate->out_src_y >> 16,
@@ -2312,13 +2304,10 @@
 			state->crtc_w, state->crtc_h, !q16_data);
 
 		SDE_DEBUG_PLANE(psde,
-			"FB[%u] %u,%u,%ux%u->crtc%u %d,%d,%ux%u, %c%c%c%c ubwc %d\n",
+			"FB[%u] %u,%u,%ux%u->crtc%u %d,%d,%ux%u, %4.4s ubwc %d\n",
 				fb->base.id, src.x, src.y, src.w, src.h,
 				crtc->base.id, dst.x, dst.y, dst.w, dst.h,
-				fmt->base.pixel_format >> 0,
-				fmt->base.pixel_format >> 8,
-				fmt->base.pixel_format >> 16,
-				fmt->base.pixel_format >> 24,
+				(char *)&fmt->base.pixel_format,
 				SDE_FORMAT_IS_UBWC(fmt));
 
 		if (sde_plane_get_property(pstate, PLANE_PROP_SRC_CONFIG) &
@@ -2771,14 +2760,11 @@
 		goto modeset_update;
 
 	SDE_DEBUG(
-		"plane%d.%u sspp:%x/%dx%d/%c%c%c%c/%llx/%dx%d+%d+%d crtc:%dx%d+%d+%d\n",
+		"plane%d.%u sspp:%x/%dx%d/%4.4s/%llx/%dx%d+%d+%d crtc:%dx%d+%d+%d\n",
 			plane->base.id, rstate->sequence_id,
 			rstate->out_rotation,
 			rstate->out_fb_width, rstate->out_fb_height,
-			rstate->out_fb_pixel_format >> 0,
-			rstate->out_fb_pixel_format >> 8,
-			rstate->out_fb_pixel_format >> 16,
-			rstate->out_fb_pixel_format >> 24,
+			(char *) &rstate->out_fb_pixel_format,
 			rstate->out_fb_modifier[0],
 			rstate->out_src_w >> 16, rstate->out_src_h >> 16,
 			rstate->out_src_x >> 16, rstate->out_src_y >> 16,
@@ -2870,11 +2856,11 @@
 		sde_kms_rect_intersect(&intersect, &src, &pstate->excl_rect);
 		if (!intersect.w || !intersect.h || SDE_FORMAT_IS_YUV(fmt)) {
 			SDE_ERROR_PLANE(psde,
-				"invalid excl_rect:{%d,%d,%d,%d} src:{%d,%d,%d,%d}, fmt:%s\n",
+				"invalid excl_rect:{%d,%d,%d,%d} src:{%d,%d,%d,%d}, fmt: %4.4s\n",
 				pstate->excl_rect.x, pstate->excl_rect.y,
 				pstate->excl_rect.w, pstate->excl_rect.h,
 				src.x, src.y, src.w, src.h,
-				drm_get_format_name(fmt->base.pixel_format));
+				(char *)&fmt->base.pixel_format);
 			ret = -EINVAL;
 		}
 	}
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index 3bb79127..a4b918e 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -151,7 +151,6 @@
  * struct sde_dbg_base - global sde debug base structure
  * @evtlog: event log instance
  * @reg_base_list: list of register dumping regions
- * @root: base debugfs root
  * @dev: device pointer
  * @power_ctrl: callback structure for enabling power for reading hw registers
  * @req_dump_blks: list of blocks requested for dumping
@@ -165,7 +164,6 @@
 static struct sde_dbg_base {
 	struct sde_dbg_evtlog *evtlog;
 	struct list_head reg_base_list;
-	struct dentry *root;
 	struct device *dev;
 	struct sde_dbg_power_ctrl power_ctrl;
 
@@ -2903,24 +2901,19 @@
 	struct sde_dbg_reg_base *blk_base;
 	char debug_name[80] = "";
 
-	sde_dbg_base.root = debugfs_create_dir("evt_dbg", debugfs_root);
-	if (IS_ERR_OR_NULL(sde_dbg_base.root)) {
-		pr_err("debugfs_create_dir fail, error %ld\n",
-		       PTR_ERR(sde_dbg_base.root));
-		sde_dbg_base.root = NULL;
-		return -ENODEV;
-	}
+	if (!debugfs_root)
+		return -EINVAL;
 
-	debugfs_create_file("dump", 0644, sde_dbg_base.root, NULL,
+	debugfs_create_file("dump", 0644, debugfs_root, NULL,
 			&sde_evtlog_fops);
-	debugfs_create_u32("enable", 0644, sde_dbg_base.root,
+	debugfs_create_u32("enable", 0644, debugfs_root,
 			&(sde_dbg_base.evtlog->enable));
-	debugfs_create_file("filter", 0644, sde_dbg_base.root,
+	debugfs_create_file("filter", 0644, debugfs_root,
 			sde_dbg_base.evtlog,
 			&sde_evtlog_filter_fops);
-	debugfs_create_u32("panic", 0644, sde_dbg_base.root,
+	debugfs_create_u32("panic", 0644, debugfs_root,
 			&sde_dbg_base.panic_on_err);
-	debugfs_create_u32("reg_dump", 0644, sde_dbg_base.root,
+	debugfs_create_u32("reg_dump", 0644, debugfs_root,
 			&sde_dbg_base.enable_reg_dump);
 
 	if (dbg->dbgbus_sde.entries) {
@@ -2928,7 +2921,7 @@
 		snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
 				dbg->dbgbus_sde.cmn.name);
 		dbg->dbgbus_sde.cmn.enable_mask = DEFAULT_DBGBUS_SDE;
-		debugfs_create_u32(debug_name, 0644, dbg->root,
+		debugfs_create_u32(debug_name, 0644, debugfs_root,
 				&dbg->dbgbus_sde.cmn.enable_mask);
 	}
 
@@ -2937,36 +2930,28 @@
 		snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
 				dbg->dbgbus_vbif_rt.cmn.name);
 		dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
-		debugfs_create_u32(debug_name, 0644, dbg->root,
+		debugfs_create_u32(debug_name, 0644, debugfs_root,
 				&dbg->dbgbus_vbif_rt.cmn.enable_mask);
 	}
 
 	list_for_each_entry(blk_base, &dbg->reg_base_list, reg_base_head) {
 		snprintf(debug_name, sizeof(debug_name), "%s_off",
 				blk_base->name);
-		debugfs_create_file(debug_name, 0644, dbg->root, blk_base,
+		debugfs_create_file(debug_name, 0644, debugfs_root, blk_base,
 				&sde_off_fops);
 
 		snprintf(debug_name, sizeof(debug_name), "%s_reg",
 				blk_base->name);
-		debugfs_create_file(debug_name, 0644, dbg->root, blk_base,
+		debugfs_create_file(debug_name, 0644, debugfs_root, blk_base,
 				&sde_reg_fops);
 	}
 
 	return 0;
 }
 
-#ifdef CONFIG_DEBUG_FS
-static void _sde_dbg_debugfs_destroy(void)
-{
-	debugfs_remove_recursive(sde_dbg_base.root);
-	sde_dbg_base.root = 0;
-}
-#else
 static void _sde_dbg_debugfs_destroy(void)
 {
 }
-#endif
 
 void sde_dbg_init_dbg_buses(u32 hwversion)
 {
diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h
index 8822df5..02d46c7 100644
--- a/drivers/gpu/drm/msm/sde_dbg.h
+++ b/drivers/gpu/drm/msm/sde_dbg.h
@@ -20,6 +20,13 @@
 #define SDE_EVTLOG_DATA_LIMITER	(-1)
 #define SDE_EVTLOG_FUNC_ENTRY	0x1111
 #define SDE_EVTLOG_FUNC_EXIT	0x2222
+#define SDE_EVTLOG_FUNC_CASE1	0x3333
+#define SDE_EVTLOG_FUNC_CASE2	0x4444
+#define SDE_EVTLOG_FUNC_CASE3	0x5555
+#define SDE_EVTLOG_FUNC_CASE4	0x6666
+#define SDE_EVTLOG_FUNC_CASE5	0x7777
+#define SDE_EVTLOG_PANIC	0xdead
+#define SDE_EVTLOG_FATAL	0xbad
 
 #define SDE_DBG_DUMP_DATA_LIMITER (NULL)
 
@@ -36,7 +43,7 @@
 };
 
 #ifdef CONFIG_DRM_SDE_EVTLOG_DEBUG
-#define SDE_EVTLOG_DEFAULT_ENABLE SDE_EVTLOG_CRITICAL
+#define SDE_EVTLOG_DEFAULT_ENABLE (SDE_EVTLOG_CRITICAL | SDE_EVTLOG_IRQ)
 #else
 #define SDE_EVTLOG_DEFAULT_ENABLE 0
 #endif
diff --git a/drivers/gpu/msm/a6xx_reg.h b/drivers/gpu/msm/a6xx_reg.h
index 218c6e7..fddfb2c 100644
--- a/drivers/gpu/msm/a6xx_reg.h
+++ b/drivers/gpu/msm/a6xx_reg.h
@@ -687,6 +687,7 @@
 #define A6XX_GMU_RPMH_CTRL			0x1F8E8
 #define A6XX_GMU_RPMH_HYST_CTRL			0x1F8E9
 #define A6XX_GMU_RPMH_POWER_STATE		0x1F8EC
+#define A6XX_GMU_BOOT_KMD_LM_HANDSHAKE		0x1F9F0
 
 /* HFI registers*/
 #define A6XX_GMU_ALWAYS_ON_COUNTER_L		0x1F888
diff --git a/drivers/gpu/msm/adreno_a6xx.c b/drivers/gpu/msm/adreno_a6xx.c
index 49d784c..d278389 100644
--- a/drivers/gpu/msm/adreno_a6xx.c
+++ b/drivers/gpu/msm/adreno_a6xx.c
@@ -648,38 +648,40 @@
 	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	struct gmu_device *gmu = &device->gmu;
 
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC)) {
-		kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_SPTPRAC_HYST,
-			0x000A0080);
-		_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL,
-			SPTP_ENABLE_MASK);
-		gmu->idle_level = GPU_HW_SPTP_PC;
-	}
-
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_IFPC)) {
-		kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
-			0x000A0080);
-		_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL,
-			IFPC_ENABLE_MASK);
-		gmu->idle_level = GPU_HW_IFPC;
-	}
-
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_HW_NAP)) {
-		_gmu_regrmw(device, A6XX_GMU_GPU_NAP_CTRL,
-			HW_NAP_ENABLE_MASK);
-		gmu->idle_level = GPU_HW_NAP;
-	}
-
-	if (ADRENO_FEATURE(adreno_dev, ADRENO_MIN_VOLT)) {
+	/* Configure registers for idle setting. The setting is cumulative */
+	switch (gmu->idle_level) {
+	case GPU_HW_MIN_VOLT:
 		_gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, MIN_BW_ENABLE_MASK);
 		_gmu_regrmw(device, A6XX_GMU_RPMH_HYST_CTRL, MIN_BW_HYST);
-		gmu->idle_level = GPU_HW_MIN_VOLT;
+		/* fall through */
+	case GPU_HW_NAP:
+		_gmu_regrmw(device, A6XX_GMU_GPU_NAP_CTRL, HW_NAP_ENABLE_MASK);
+		/* fall through */
+	case GPU_HW_IFPC:
+		kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
+				0x000A0080);
+		_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL,
+				IFPC_ENABLE_MASK);
+		/* fall through */
+	case GPU_HW_SPTP_PC:
+		kgsl_gmu_regwrite(device, A6XX_GMU_PWR_COL_SPTPRAC_HYST,
+				0x000A0080);
+		_gmu_regrmw(device, A6XX_GMU_PWR_COL_INTER_FRAME_CTRL,
+				SPTP_ENABLE_MASK);
+		/* fall through */
+	default:
+		break;
 	}
 
+	/* ACD feature enablement */
+	if (ADRENO_FEATURE(adreno_dev, ADRENO_LM))
+		_gmu_regrmw(device, A6XX_GMU_BOOT_KMD_LM_HANDSHAKE, BIT(10));
+
 	/* Enable RPMh GPU client */
 	if (ADRENO_FEATURE(adreno_dev, ADRENO_RPMH))
 		_gmu_regrmw(device, A6XX_GMU_RPMH_CTRL, RPMH_ENABLE_MASK);
 
+	/* Disable reference bandgap voltage */
 	kgsl_gmu_regwrite(device, A6XX_GMU_AO_SPARE_CNTL, 1);
 }
 
@@ -1129,8 +1131,6 @@
 	struct gmu_memdesc *mem_addr = gmu->hfi_mem;
 	int ret, i;
 
-	a6xx_gmu_power_config(device);
-
 	if (boot_state == GMU_COLD_BOOT || boot_state == GMU_RESET) {
 		/* Turn on the HM and SPTP head switches */
 		ret = a6xx_hm_sptprac_control(device, true);
@@ -1175,6 +1175,8 @@
 	kgsl_gmu_regwrite(device, A6XX_GMU_AHB_FENCE_RANGE_0,
 			FENCE_RANGE_MASK);
 
+	/* Configure power control and bring the GMU out of reset */
+	a6xx_gmu_power_config(device);
 	ret = a6xx_gmu_start(device);
 	if (ret)
 		return ret;
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index d1c84f1..fa4ca39 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -1775,9 +1775,9 @@
 	/* Commit the pointer to the context in context_idr */
 	write_lock(&device->context_lock);
 	idr_replace(&device->context_idr, context, context->id);
+	param->drawctxt_id = context->id;
 	write_unlock(&device->context_lock);
 
-	param->drawctxt_id = context->id;
 done:
 	return result;
 }
diff --git a/drivers/gpu/msm/kgsl_gmu.c b/drivers/gpu/msm/kgsl_gmu.c
index 2e9f108..97e4b6f 100644
--- a/drivers/gpu/msm/kgsl_gmu.c
+++ b/drivers/gpu/msm/kgsl_gmu.c
@@ -986,6 +986,7 @@
 	struct gmu_memdesc *mem_addr = NULL;
 	struct kgsl_hfi *hfi = &gmu->hfi;
 	struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+	struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
 	int i = 0, ret = -ENXIO;
 
 	node = of_find_compatible_node(device->pdev->dev.of_node,
@@ -1086,7 +1087,17 @@
 
 	hfi_init(&gmu->hfi, mem_addr, HFI_QUEUE_SIZE);
 
-	gmu->idle_level = GPU_HW_ACTIVE;
+	/* Set up GMU idle states */
+	if (ADRENO_FEATURE(adreno_dev, ADRENO_MIN_VOLT))
+		gmu->idle_level = GPU_HW_MIN_VOLT;
+	else if (ADRENO_FEATURE(adreno_dev, ADRENO_HW_NAP))
+		gmu->idle_level = GPU_HW_NAP;
+	else if (ADRENO_FEATURE(adreno_dev, ADRENO_IFPC))
+		gmu->idle_level = GPU_HW_IFPC;
+	else if (ADRENO_FEATURE(adreno_dev, ADRENO_SPTP_PC))
+		gmu->idle_level = GPU_HW_SPTP_PC;
+	else
+		gmu->idle_level = GPU_HW_ACTIVE;
 
 	return 0;
 
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 3a4474d..94b2e2f9 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -578,27 +578,29 @@
 }
 EXPORT_SYMBOL_GPL(coresight_enable);
 
-void coresight_disable(struct coresight_device *csdev)
+static void __coresight_disable(struct coresight_device *csdev)
 {
 	int  ret;
 
-	mutex_lock(&coresight_mutex);
-
 	ret = coresight_validate_source(csdev, __func__);
 	if (ret)
-		goto out;
+		return;
 
 	if (!csdev->enable)
-		goto out;
+		return;
 
 	if (csdev->node == NULL)
-		goto out;
+		return;
 
 	coresight_disable_source(csdev);
 	coresight_disable_path(csdev->node->path);
 	coresight_release_path(csdev, csdev->node->path);
+}
 
-out:
+void coresight_disable(struct coresight_device *csdev)
+{
+	mutex_lock(&coresight_mutex);
+	__coresight_disable(csdev);
 	mutex_unlock(&coresight_mutex);
 }
 EXPORT_SYMBOL_GPL(coresight_disable);
@@ -904,7 +906,7 @@
 		csdev = coresight_get_source(cspath->path);
 		if (!csdev)
 			continue;
-		coresight_disable(csdev);
+		__coresight_disable(csdev);
 	}
 
 	mutex_unlock(&coresight_mutex);
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index e412230..b521df6 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -38,6 +38,7 @@
 #define FG_ADC_RR_FAKE_BATT_HIGH_MSB		0x5B
 
 #define FG_ADC_RR_BATT_ID_CTRL			0x60
+#define FG_ADC_RR_BATT_ID_CTRL_CHANNEL_CONV	BIT(0)
 #define FG_ADC_RR_BATT_ID_TRIGGER		0x61
 #define FG_ADC_RR_BATT_ID_TRIGGER_CTL		BIT(0)
 #define FG_ADC_RR_BATT_ID_STS			0x62
@@ -748,6 +749,75 @@
 	return rc;
 }
 
+static int rradc_enable_batt_id_channel(struct rradc_chip *chip, bool enable)
+{
+	int rc = 0;
+
+	if (enable) {
+		rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_CTRL,
+				FG_ADC_RR_BATT_ID_CTRL_CHANNEL_CONV,
+				FG_ADC_RR_BATT_ID_CTRL_CHANNEL_CONV);
+		if (rc < 0) {
+			pr_err("Enabling BATT ID channel failed:%d\n", rc);
+			return rc;
+		}
+	} else {
+		rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_CTRL,
+				FG_ADC_RR_BATT_ID_CTRL_CHANNEL_CONV, 0);
+		if (rc < 0) {
+			pr_err("Disabling BATT ID channel failed:%d\n", rc);
+			return rc;
+		}
+	}
+
+	return rc;
+}
+
+static int rradc_do_batt_id_conversion(struct rradc_chip *chip,
+		struct rradc_chan_prop *prop, u16 *data, u8 *buf)
+{
+	int rc = 0, ret = 0;
+
+	rc = rradc_enable_batt_id_channel(chip, true);
+	if (rc < 0) {
+		pr_err("Enabling BATT ID channel failed:%d\n", rc);
+		return rc;
+	}
+
+	rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_TRIGGER,
+				FG_ADC_RR_BATT_ID_TRIGGER_CTL,
+				FG_ADC_RR_BATT_ID_TRIGGER_CTL);
+	if (rc < 0) {
+		pr_err("BATT_ID trigger set failed:%d\n", rc);
+		ret = rc;
+		rc = rradc_enable_batt_id_channel(chip, false);
+		if (rc < 0)
+			pr_err("Disabling BATT ID channel failed:%d\n", rc);
+		return ret;
+	}
+
+	rc = rradc_read_channel_with_continuous_mode(chip, prop, buf);
+	if (rc < 0) {
+		pr_err("Error reading in continuous mode:%d\n", rc);
+		ret = rc;
+	}
+
+	rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_TRIGGER,
+			FG_ADC_RR_BATT_ID_TRIGGER_CTL, 0);
+	if (rc < 0) {
+		pr_err("BATT_ID trigger re-set failed:%d\n", rc);
+		ret = rc;
+	}
+
+	rc = rradc_enable_batt_id_channel(chip, false);
+	if (rc < 0) {
+		pr_err("Disabling BATT ID channel failed:%d\n", rc);
+		ret = rc;
+	}
+
+	return ret;
+}
+
 static int rradc_do_conversion(struct rradc_chip *chip,
 			struct rradc_chan_prop *prop, u16 *data)
 {
@@ -760,24 +830,9 @@
 
 	switch (prop->channel) {
 	case RR_ADC_BATT_ID:
-		rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_TRIGGER,
-				FG_ADC_RR_BATT_ID_TRIGGER_CTL,
-				FG_ADC_RR_BATT_ID_TRIGGER_CTL);
+		rc = rradc_do_batt_id_conversion(chip, prop, data, buf);
 		if (rc < 0) {
-			pr_err("BATT_ID trigger set failed:%d\n", rc);
-			goto fail;
-		}
-
-		rc = rradc_read_channel_with_continuous_mode(chip, prop, buf);
-		if (rc < 0) {
-			pr_err("Error reading in continuous mode:%d\n", rc);
-			goto fail;
-		}
-
-		rc = rradc_masked_write(chip, FG_ADC_RR_BATT_ID_TRIGGER,
-				FG_ADC_RR_BATT_ID_TRIGGER_CTL, 0);
-		if (rc < 0) {
-			pr_err("BATT_ID trigger re-set failed:%d\n", rc);
+			pr_err("Battery ID conversion failed:%d\n", rc);
 			goto fail;
 		}
 		break;
diff --git a/drivers/iio/adc/qcom-tadc.c b/drivers/iio/adc/qcom-tadc.c
index 9241288..05b1985 100644
--- a/drivers/iio/adc/qcom-tadc.c
+++ b/drivers/iio/adc/qcom-tadc.c
@@ -18,7 +18,12 @@
 #include <linux/of_irq.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
+#include <linux/power_supply.h>
+#include <linux/pmic-voter.h>
 
+#define USB_PRESENT_VOTER			"USB_PRESENT_VOTER"
+#define SLEEP_VOTER				"SLEEP_VOTER"
+#define SHUTDOWN_VOTER				"SHUTDOWN_VOTER"
 #define TADC_REVISION1_REG			0x00
 #define TADC_REVISION2_REG			0x01
 #define TADC_REVISION3_REG			0x02
@@ -54,6 +59,7 @@
 #define TADC_CH7_ADC_HI_REG(chip)		(chip->tadc_base + 0x73)
 #define TADC_CH8_ADC_LO_REG(chip)		(chip->tadc_base + 0x74)
 #define TADC_CH8_ADC_HI_REG(chip)		(chip->tadc_base + 0x75)
+#define TADC_ADC_DIRECT_TST(chip)		(chip->tadc_base + 0xE7)
 
 /* TADC_CMP register definitions */
 #define TADC_CMP_THR1_CMP_REG(chip)		(chip->tadc_cmp_base + 0x51)
@@ -217,6 +223,12 @@
 	struct tadc_chan_data	chans[TADC_NUM_CH];
 	struct completion	eoc_complete;
 	struct mutex		write_lock;
+	struct mutex		conv_lock;
+	struct power_supply	*usb_psy;
+	struct votable		*tadc_disable_votable;
+	struct work_struct	status_change_work;
+	struct notifier_block	nb;
+	u8			hwtrig_conv;
 };
 
 struct tadc_pt {
@@ -274,7 +286,7 @@
 	if ((reg & 0xFF00) == chip->tadc_cmp_base)
 		return true;
 
-	if (reg == TADC_HWTRIG_CONV_CH_EN_REG(chip))
+	if (reg >= TADC_HWTRIG_CONV_CH_EN_REG(chip))
 		return true;
 
 	return false;
@@ -345,6 +357,26 @@
 	return rc;
 }
 
+static int tadc_masked_write(struct tadc_chip *chip, u16 reg, u8 mask, u8 data)
+{
+	int rc = 0;
+
+	mutex_lock(&chip->write_lock);
+	if (tadc_is_reg_locked(chip, reg)) {
+		rc = regmap_write(chip->regmap, (reg & 0xFF00) | 0xD0, 0xA5);
+		if (rc < 0) {
+			pr_err("Couldn't unlock secure register rc=%d\n", rc);
+			goto unlock;
+		}
+	}
+
+	rc = regmap_update_bits(chip->regmap, reg, mask, data);
+
+unlock:
+	mutex_unlock(&chip->write_lock);
+	return rc;
+}
+
 static int tadc_lerp(const struct tadc_pt *pts, size_t size, bool inv,
 							s32 input, s32 *output)
 {
@@ -480,12 +512,22 @@
 {
 	unsigned long timeout, timeleft;
 	u8 val[TADC_NUM_CH * 2];
-	int rc, i;
+	int rc = 0, i;
 
+	mutex_lock(&chip->conv_lock);
 	rc = tadc_read(chip, TADC_MBG_ERR_REG(chip), val, 1);
 	if (rc < 0) {
 		pr_err("Couldn't read mbg error status rc=%d\n", rc);
-		return rc;
+		goto unlock;
+	}
+
+	reinit_completion(&chip->eoc_complete);
+
+	if (get_effective_result(chip->tadc_disable_votable)) {
+		/* leave it back in completed state */
+		complete_all(&chip->eoc_complete);
+		rc = -ENODATA;
+		goto unlock;
 	}
 
 	if (val[0] != 0) {
@@ -496,7 +538,7 @@
 	rc = tadc_write(chip, TADC_CONV_REQ_REG(chip), channels);
 	if (rc < 0) {
 		pr_err("Couldn't write conversion request rc=%d\n", rc);
-		return rc;
+		goto unlock;
 	}
 
 	timeout = msecs_to_jiffies(CONVERSION_TIMEOUT_MS);
@@ -506,25 +548,34 @@
 		rc = tadc_read(chip, TADC_SW_CH_CONV_REG(chip), val, 1);
 		if (rc < 0) {
 			pr_err("Couldn't read conversion status rc=%d\n", rc);
-			return rc;
+			goto unlock;
 		}
 
+		/*
+		 * check one last time if the channel we are requesting
+		 * has completed conversion
+		 */
 		if (val[0] != channels) {
-			pr_err("Conversion timed out\n");
-			return -ETIMEDOUT;
+			rc = -ETIMEDOUT;
+			goto unlock;
 		}
 	}
 
 	rc = tadc_read(chip, TADC_CH1_ADC_LO_REG(chip), val, ARRAY_SIZE(val));
 	if (rc < 0) {
 		pr_err("Couldn't read adc channels rc=%d\n", rc);
-		return rc;
+		goto unlock;
 	}
 
 	for (i = 0; i < TADC_NUM_CH; i++)
 		adc[i] = (s16)(val[i * 2] | (u16)val[i * 2 + 1] << 8);
 
-	return jiffies_to_msecs(timeout - timeleft);
+	pr_debug("Conversion time for channels 0x%x = %dms\n", channels,
+			jiffies_to_msecs(timeout - timeleft));
+
+unlock:
+	mutex_unlock(&chip->conv_lock);
+	return rc;
 }
 
 static int tadc_read_raw(struct iio_dev *indio_dev,
@@ -593,12 +644,17 @@
 			break;
 		default:
 			rc = tadc_do_conversion(chip, BIT(chan->channel), adc);
-			if (rc >= 0)
-				*val = adc[chan->channel];
+			if (rc < 0) {
+				if (rc != -ENODATA)
+					pr_err("Couldn't read battery current and voltage channels rc=%d\n",
+									rc);
+				return rc;
+			}
+			*val = adc[chan->channel];
 			break;
 		}
 
-		if (rc < 0) {
+		if (rc < 0 && rc != -ENODATA) {
 			pr_err("Couldn't read channel %d\n", chan->channel);
 			return rc;
 		}
@@ -630,7 +686,7 @@
 		case TADC_BATT_P:
 			rc = tadc_do_conversion(chip,
 				BIT(TADC_BATT_I) | BIT(TADC_BATT_V), adc);
-			if (rc < 0) {
+			if (rc < 0 && rc != -ENODATA) {
 				pr_err("Couldn't read battery current and voltage channels rc=%d\n",
 									rc);
 				return rc;
@@ -641,7 +697,7 @@
 		case TADC_INPUT_P:
 			rc = tadc_do_conversion(chip,
 				BIT(TADC_INPUT_I) | BIT(TADC_INPUT_V), adc);
-			if (rc < 0) {
+			if (rc < 0 && rc != -ENODATA) {
 				pr_err("Couldn't read input current and voltage channels rc=%d\n",
 									rc);
 				return rc;
@@ -683,6 +739,7 @@
 		case TADC_DIE_TEMP:
 		case TADC_DIE_TEMP_THR1:
 		case TADC_DIE_TEMP_THR2:
+		case TADC_DIE_TEMP_THR3:
 			*val = chan_data->scale;
 			return IIO_VAL_INT;
 		case TADC_BATT_I:
@@ -821,15 +878,137 @@
 	return 0;
 }
 
-
 static irqreturn_t handle_eoc(int irq, void *dev_id)
 {
 	struct tadc_chip *chip = dev_id;
 
-	complete(&chip->eoc_complete);
+	complete_all(&chip->eoc_complete);
 	return IRQ_HANDLED;
 }
 
+static int tadc_disable_vote_callback(struct votable *votable,
+			void *data, int disable, const char *client)
+{
+	struct tadc_chip *chip = data;
+	int rc;
+	int timeout;
+	unsigned long timeleft;
+
+	if (disable) {
+		timeout = msecs_to_jiffies(CONVERSION_TIMEOUT_MS);
+		timeleft = wait_for_completion_timeout(&chip->eoc_complete,
+				timeout);
+		if (timeleft == 0)
+			pr_err("Timed out waiting for eoc, disabling hw conversions regardless\n");
+
+		rc = tadc_read(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip),
+							&chip->hwtrig_conv, 1);
+		if (rc < 0) {
+			pr_err("Couldn't save hw conversions rc=%d\n", rc);
+			return rc;
+		}
+		rc = tadc_write(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip), 0x00);
+		if (rc < 0) {
+			pr_err("Couldn't disable hw conversions rc=%d\n", rc);
+			return rc;
+		}
+		rc = tadc_write(chip, TADC_ADC_DIRECT_TST(chip), 0x80);
+		if (rc < 0) {
+			pr_err("Couldn't enable direct test mode rc=%d\n", rc);
+			return rc;
+		}
+	} else {
+		rc = tadc_write(chip, TADC_ADC_DIRECT_TST(chip), 0x00);
+		if (rc < 0) {
+			pr_err("Couldn't disable direct test mode rc=%d\n", rc);
+			return rc;
+		}
+		rc = tadc_write(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip),
+							chip->hwtrig_conv);
+		if (rc < 0) {
+			pr_err("Couldn't restore hw conversions rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	pr_debug("client: %s disable: %d\n", client, disable);
+	return 0;
+}
+
+static void status_change_work(struct work_struct *work)
+{
+	struct tadc_chip *chip = container_of(work,
+			struct tadc_chip, status_change_work);
+	union power_supply_propval pval = {0, };
+	int rc;
+
+	if (!chip->usb_psy)
+		chip->usb_psy = power_supply_get_by_name("usb");
+
+	if (!chip->usb_psy) {
+		/* treat usb is not present */
+		vote(chip->tadc_disable_votable, USB_PRESENT_VOTER, true, 0);
+		return;
+	}
+
+	rc = power_supply_get_property(chip->usb_psy,
+		       POWER_SUPPLY_PROP_PRESENT, &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get present status rc=%d\n", rc);
+		/* treat usb is not present */
+		vote(chip->tadc_disable_votable, USB_PRESENT_VOTER, true, 0);
+		return;
+	}
+
+	/* disable if usb is not present */
+	vote(chip->tadc_disable_votable, USB_PRESENT_VOTER, !pval.intval, 0);
+}
+
+static int tadc_notifier_call(struct notifier_block *nb,
+		unsigned long ev, void *v)
+{
+	struct power_supply *psy = v;
+	struct tadc_chip *chip = container_of(nb, struct tadc_chip, nb);
+
+	if (ev != PSY_EVENT_PROP_CHANGED)
+		return NOTIFY_OK;
+
+	if ((strcmp(psy->desc->name, "usb") == 0))
+		schedule_work(&chip->status_change_work);
+
+	return NOTIFY_OK;
+}
+
+static int tadc_register_notifier(struct tadc_chip *chip)
+{
+	int rc;
+
+	chip->nb.notifier_call = tadc_notifier_call;
+	rc = power_supply_reg_notifier(&chip->nb);
+	if (rc < 0) {
+		pr_err("Couldn't register psy notifier rc = %d\n", rc);
+		return rc;
+	}
+
+	return 0;
+}
+
+static int tadc_suspend(struct device *dev)
+{
+	struct tadc_chip *chip = dev_get_drvdata(dev);
+
+	vote(chip->tadc_disable_votable, SLEEP_VOTER, true, 0);
+	return 0;
+}
+
+static int tadc_resume(struct device *dev)
+{
+	struct tadc_chip *chip = dev_get_drvdata(dev);
+
+	vote(chip->tadc_disable_votable, SLEEP_VOTER, false, 0);
+	return 0;
+}
+
 static int tadc_set_therm_table(struct tadc_chan_data *chan_data, u32 beta,
 				u32 rtherm)
 {
@@ -975,16 +1154,23 @@
 		return rc;
 	}
 
-	/* enable all temperature hardware triggers */
-	rc = tadc_write(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip),
-							BIT(TADC_THERM1) |
-							BIT(TADC_THERM2) |
-							BIT(TADC_DIE_TEMP));
+	/* enable connector and die temp hardware triggers */
+	rc = tadc_masked_write(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip),
+					BIT(TADC_THERM2) | BIT(TADC_DIE_TEMP),
+					BIT(TADC_THERM2) | BIT(TADC_DIE_TEMP));
 	if (rc < 0) {
 		pr_err("Couldn't enable hardware triggers rc=%d\n", rc);
 		return rc;
 	}
 
+	/* save hw triggered conversion configuration */
+	rc = tadc_read(chip, TADC_HWTRIG_CONV_CH_EN_REG(chip),
+							&chip->hwtrig_conv, 1);
+	if (rc < 0) {
+		pr_err("Couldn't save hw conversions rc=%d\n", rc);
+		return rc;
+	}
+
 	return 0;
 }
 
@@ -1009,6 +1195,12 @@
 	chip->dev = &pdev->dev;
 	init_completion(&chip->eoc_complete);
 
+	/*
+	 * set the completion in "completed" state so disable of the tadc
+	 * can progress
+	 */
+	complete_all(&chip->eoc_complete);
+
 	rc = of_property_read_u32(node, "reg", &chip->tadc_base);
 	if (rc < 0) {
 		pr_err("Couldn't read base address rc=%d\n", rc);
@@ -1017,6 +1209,8 @@
 	chip->tadc_cmp_base = chip->tadc_base + 0x100;
 
 	mutex_init(&chip->write_lock);
+	mutex_init(&chip->conv_lock);
+	INIT_WORK(&chip->status_change_work, status_change_work);
 	chip->regmap = dev_get_regmap(chip->dev->parent, NULL);
 	if (!chip->regmap) {
 		pr_err("Couldn't get regmap\n");
@@ -1035,17 +1229,36 @@
 		return rc;
 	}
 
+	chip->tadc_disable_votable = create_votable("SMB_TADC_DISABLE",
+					VOTE_SET_ANY,
+					tadc_disable_vote_callback,
+					chip);
+	if (IS_ERR(chip->tadc_disable_votable)) {
+		rc = PTR_ERR(chip->tadc_disable_votable);
+		return rc;
+	}
+	/* assume usb is not present */
+	vote(chip->tadc_disable_votable, USB_PRESENT_VOTER, true, 0);
+	vote(chip->tadc_disable_votable, SHUTDOWN_VOTER, false, 0);
+	vote(chip->tadc_disable_votable, SLEEP_VOTER, false, 0);
+
+	rc = tadc_register_notifier(chip);
+	if (rc < 0) {
+		pr_err("Couldn't register notifier=%d\n", rc);
+		goto destroy_votable;
+	}
+
 	irq = of_irq_get_byname(node, "eoc");
 	if (irq < 0) {
 		pr_err("Couldn't get eoc irq rc=%d\n", irq);
-		return irq;
+		goto destroy_votable;
 	}
 
 	rc = devm_request_threaded_irq(chip->dev, irq, NULL, handle_eoc,
 						IRQF_ONESHOT, "eoc", chip);
 	if (rc < 0) {
 		pr_err("Couldn't request irq %d rc=%d\n", irq, rc);
-		return rc;
+		goto destroy_votable;
 	}
 
 	indio_dev->dev.parent = chip->dev;
@@ -1058,17 +1271,37 @@
 	rc = devm_iio_device_register(chip->dev, indio_dev);
 	if (rc < 0) {
 		pr_err("Couldn't register IIO device rc=%d\n", rc);
-		return rc;
+		goto destroy_votable;
 	}
 
+	platform_set_drvdata(pdev, chip);
 	return 0;
+
+destroy_votable:
+	destroy_votable(chip->tadc_disable_votable);
+	return rc;
 }
 
 static int tadc_remove(struct platform_device *pdev)
 {
+	struct tadc_chip *chip = platform_get_drvdata(pdev);
+
+	destroy_votable(chip->tadc_disable_votable);
 	return 0;
 }
 
+static void tadc_shutdown(struct platform_device *pdev)
+{
+	struct tadc_chip *chip = platform_get_drvdata(pdev);
+
+	vote(chip->tadc_disable_votable, SHUTDOWN_VOTER, true, 0);
+}
+
+static const struct dev_pm_ops tadc_pm_ops = {
+	.resume		= tadc_resume,
+	.suspend	= tadc_suspend,
+};
+
 static const struct of_device_id tadc_match_table[] = {
 	{ .compatible = "qcom,tadc" },
 	{ }
@@ -1076,12 +1309,14 @@
 MODULE_DEVICE_TABLE(of, tadc_match_table);
 
 static struct platform_driver tadc_driver = {
-	.driver	= {
+	.driver		= {
 		.name		= "qcom-tadc",
 		.of_match_table	= tadc_match_table,
+		.pm		= &tadc_pm_ops,
 	},
-	.probe	= tadc_probe,
-	.remove	= tadc_remove,
+	.probe		= tadc_probe,
+	.remove		= tadc_remove,
+	.shutdown	= tadc_shutdown,
 };
 module_platform_driver(tadc_driver);
 
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.c b/drivers/media/platform/msm/vidc/hfi_packetization.c
index 87a4ac8..709f1d8 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.c
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.c
@@ -1297,18 +1297,6 @@
 			sizeof(struct hfi_quantization_range);
 		break;
 	}
-	case HAL_PARAM_VENC_MAX_NUM_B_FRAMES:
-	{
-		struct hfi_max_num_b_frames *hfi;
-
-		pkt->rg_property_data[0] =
-			HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES;
-		hfi = (struct hfi_max_num_b_frames *) &pkt->rg_property_data[1];
-		memcpy(hfi, (struct hfi_max_num_b_frames *) pdata,
-				sizeof(struct hfi_max_num_b_frames));
-		pkt->size += sizeof(u32) + sizeof(struct hfi_max_num_b_frames);
-		break;
-	}
 	case HAL_CONFIG_VENC_INTRA_PERIOD:
 	{
 		struct hfi_intra_period *hfi;
diff --git a/drivers/media/platform/msm/vidc/hfi_packetization.h b/drivers/media/platform/msm/vidc/hfi_packetization.h
index e0def0f..06c0574 100644
--- a/drivers/media/platform/msm/vidc/hfi_packetization.h
+++ b/drivers/media/platform/msm/vidc/hfi_packetization.h
@@ -18,9 +18,9 @@
 #include "vidc_hfi.h"
 #include "vidc_hfi_api.h"
 
-#define call_hfi_pkt_op(q, op, args...)			\
+#define call_hfi_pkt_op(q, op, ...)			\
 	(((q) && (q)->pkt_ops && (q)->pkt_ops->op) ?	\
-	((q)->pkt_ops->op(args)) : 0)
+	((q)->pkt_ops->op(__VA_ARGS__)) : 0)
 
 enum hfi_packetization_type {
 	HFI_PACKETIZATION_4XX,
diff --git a/drivers/media/platform/msm/vidc/hfi_response_handler.c b/drivers/media/platform/msm/vidc/hfi_response_handler.c
index 00830cc..3378ff0 100644
--- a/drivers/media/platform/msm/vidc/hfi_response_handler.c
+++ b/drivers/media/platform/msm/vidc/hfi_response_handler.c
@@ -241,10 +241,8 @@
 		} while (num_properties_changed > 0);
 	}
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_EVENT_CHANGE,
-		.response.event = event_notify,
-	};
+	info->response_type = HAL_SESSION_EVENT_CHANGE;
+	info->response.event = event_notify;
 
 	return 0;
 }
@@ -275,10 +273,8 @@
 	event_notify.packet_buffer = data->packet_buffer;
 	event_notify.extra_data_buffer = data->extra_data_buffer;
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_EVENT_CHANGE,
-		.response.event = event_notify,
-	};
+	info->response_type = HAL_SESSION_EVENT_CHANGE;
+	info->response.event = event_notify;
 
 	return 0;
 }
@@ -289,10 +285,8 @@
 
 	cmd_done.device_id = device_id;
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SYS_ERROR,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SYS_ERROR;
+	info->response.cmd = cmd_done;
 
 	return 0;
 }
@@ -315,17 +309,13 @@
 	case HFI_ERR_SESSION_UPSCALE_NOT_SUPPORTED:
 		cmd_done.status = VIDC_ERR_NONE;
 		dprintk(VIDC_INFO, "Non Fatal: HFI_EVENT_SESSION_ERROR\n");
-		*info = (struct msm_vidc_cb_info) {
-			.response_type =  HAL_RESPONSE_UNUSED,
-			.response.cmd = cmd_done,
-		};
+		info->response_type = HAL_RESPONSE_UNUSED;
+		info->response.cmd = cmd_done;
 		return 0;
 	default:
 		dprintk(VIDC_ERR, "HFI_EVENT_SESSION_ERROR\n");
-		*info = (struct msm_vidc_cb_info) {
-			.response_type =  HAL_SESSION_ERROR,
-			.response.cmd = cmd_done,
-		};
+		info->response_type = HAL_SESSION_ERROR;
+		info->response.cmd = cmd_done;
 		return 0;
 	}
 }
@@ -403,10 +393,10 @@
 	cmd_done.session_id = NULL;
 	cmd_done.status = (u32)status;
 	cmd_done.size = sizeof(struct vidc_hal_sys_init_done);
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SYS_INIT_DONE,
-		.response.cmd = cmd_done,
-	};
+
+	info->response_type = HAL_SYS_INIT_DONE;
+	info->response.cmd = cmd_done;
+
 	return 0;
 }
 
@@ -433,10 +423,8 @@
 	cmd_done.status = (u32) status;
 	cmd_done.size = 0;
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SYS_RELEASE_RESOURCE_DONE,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SYS_RELEASE_RESOURCE_DONE;
+	info->response.cmd = cmd_done;
 
 	return 0;
 }
@@ -1162,10 +1150,8 @@
 		cmd_done.data.property.buf_req = buff_req;
 		cmd_done.size = sizeof(buff_req);
 
-		*info = (struct msm_vidc_cb_info) {
-			.response_type =  HAL_SESSION_PROPERTY_INFO,
-			.response.cmd = cmd_done,
-		};
+		info->response_type = HAL_SESSION_PROPERTY_INFO;
+		info->response.cmd = cmd_done;
 
 		return 0;
 	default:
@@ -1197,10 +1183,8 @@
 	cmd_done.data.session_init_done = session_init_done;
 	cmd_done.size = sizeof(struct vidc_hal_session_init_done);
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_INIT_DONE,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SESSION_INIT_DONE;
+	info->response.cmd = cmd_done;
 
 	return 0;
 }
@@ -1227,10 +1211,8 @@
 	cmd_done.status = hfi_map_err_status(pkt->error_type);
 	cmd_done.size = 0;
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_LOAD_RESOURCE_DONE,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SESSION_LOAD_RESOURCE_DONE;
+	info->response.cmd = cmd_done;
 
 	return 0;
 }
@@ -1272,10 +1254,8 @@
 		return -EINVAL;
 	}
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_FLUSH_DONE,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SESSION_FLUSH_DONE;
+	info->response.cmd = cmd_done;
 
 	return 0;
 }
@@ -1323,10 +1303,8 @@
 		(u32)pkt->packet_buffer, -1, -1,
 		pkt->filled_len, pkt->offset);
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_ETB_DONE,
-		.response.data = data_done,
-	};
+	info->response_type = HAL_SESSION_ETB_DONE;
+	info->response.data = data_done;
 
 	return 0;
 }
@@ -1450,10 +1428,8 @@
 		data_done.output_done.filled_len1,
 		data_done.output_done.offset1);
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_FTB_DONE,
-		.response.data = data_done,
-	};
+	info->response_type = HAL_SESSION_FTB_DONE;
+	info->response.data = data_done;
 
 	return 0;
 }
@@ -1479,10 +1455,8 @@
 	cmd_done.status = hfi_map_err_status(pkt->error_type);
 	cmd_done.size = 0;
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_START_DONE,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SESSION_START_DONE;
+	info->response.cmd = cmd_done;
 	return 0;
 }
 
@@ -1507,10 +1481,8 @@
 	cmd_done.status = hfi_map_err_status(pkt->error_type);
 	cmd_done.size = 0;
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_STOP_DONE,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SESSION_STOP_DONE;
+	info->response.cmd = cmd_done;
 
 	return 0;
 }
@@ -1536,10 +1508,8 @@
 	cmd_done.status = hfi_map_err_status(pkt->error_type);
 	cmd_done.size = 0;
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_RELEASE_RESOURCE_DONE,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SESSION_RELEASE_RESOURCE_DONE;
+	info->response.cmd = cmd_done;
 
 	return 0;
 }
@@ -1571,10 +1541,8 @@
 		dprintk(VIDC_ERR, "invalid payload in rel_buff_done\n");
 	}
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_RELEASE_BUFFER_DONE,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SESSION_RELEASE_BUFFER_DONE;
+	info->response.cmd = cmd_done;
 
 	return 0;
 }
@@ -1598,10 +1566,8 @@
 	cmd_done.status = hfi_map_err_status(pkt->error_type);
 	cmd_done.size = 0;
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_END_DONE,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SESSION_END_DONE;
+	info->response.cmd = cmd_done;
 
 	return 0;
 }
@@ -1626,10 +1592,8 @@
 	cmd_done.status = hfi_map_err_status(pkt->error_type);
 	cmd_done.size = 0;
 
-	*info = (struct msm_vidc_cb_info) {
-		.response_type =  HAL_SESSION_ABORT_DONE,
-		.response.cmd = cmd_done,
-	};
+	info->response_type = HAL_SESSION_ABORT_DONE;
+	info->response.cmd = cmd_done;
 
 	return 0;
 }
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 13cc1b2..12f4c54 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1093,7 +1093,7 @@
 	if (inst->fmts[CAPTURE_PORT].fourcc != V4L2_PIX_FMT_VP8)
 		return 0;
 
-	num_enh_layers = layers ? : 0;
+	num_enh_layers = layers ? layers : 0;
 	dprintk(VIDC_DBG, "%s Hier-P in firmware\n",
 			num_enh_layers ? "Enable" : "Disable");
 
@@ -1244,7 +1244,6 @@
 	case V4L2_CID_MPEG_VIDC_VIDEO_NUM_P_FRAMES:
 	{
 		int num_p, num_b;
-		u32 max_num_b_frames;
 
 		temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES);
 		num_b = temp_ctrl->val;
@@ -1257,34 +1256,10 @@
 		else if (ctrl->id == V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES)
 			num_b = ctrl->val;
 
-		max_num_b_frames = num_b ? MAX_NUM_B_FRAMES : 0;
-		property_id = HAL_PARAM_VENC_MAX_NUM_B_FRAMES;
-		pdata = &max_num_b_frames;
-		rc = call_hfi_op(hdev, session_set_property,
-			(void *)inst->session, property_id, pdata);
-		if (rc) {
-			dprintk(VIDC_ERR,
-				"Failed : Setprop MAX_NUM_B_FRAMES %d\n",
-				rc);
-			break;
-		}
-
 		property_id = HAL_CONFIG_VENC_INTRA_PERIOD;
 		intra_period.pframes = num_p;
 		intra_period.bframes = num_b;
 
-		/*
-		 *Incase firmware does not have B-Frame support,
-		 *offload the b-frame count to p-frame to make up
-		 *for the requested Intraperiod
-		 */
-		if (!inst->capability.bframe.max) {
-			intra_period.pframes = num_p + num_b;
-			intra_period.bframes = 0;
-			dprintk(VIDC_DBG,
-				"No bframe support, changing pframe from %d to %d\n",
-				num_p, intra_period.pframes);
-		}
 		pdata = &intra_period;
 		break;
 	}
@@ -1297,7 +1272,10 @@
 	case V4L2_CID_MPEG_VIDEO_BITRATE_MODE:
 	{
 		int final_mode = 0;
-		struct v4l2_ctrl update_ctrl = {.id = 0};
+		struct v4l2_ctrl update_ctrl;
+
+		update_ctrl.id = 0;
+		update_ctrl.val = 0;
 
 		/* V4L2_CID_MPEG_VIDEO_BITRATE_MODE and _RATE_CONTROL
 		 * manipulate the same thing.  If one control's state
@@ -1353,7 +1331,7 @@
 	{
 		property_id = HAL_CONFIG_VENC_TARGET_BITRATE;
 		bitrate.bit_rate = ctrl->val;
-		bitrate.layer_id = 0;
+		bitrate.layer_id = MSM_VIDC_ALL_LAYER_ID;
 		pdata = &bitrate;
 		inst->bitrate = ctrl->val;
 		break;
@@ -1976,7 +1954,7 @@
 int msm_venc_s_ext_ctrl(struct msm_vidc_inst *inst,
 	struct v4l2_ext_controls *ctrl)
 {
-	int rc = 0, i, j = 0;
+	int rc = 0, i;
 	struct v4l2_ext_control *control;
 	struct hfi_device *hdev;
 	struct hal_ltr_mode ltr_mode;
@@ -2044,32 +2022,6 @@
 			property_id = HAL_PROPERTY_PARAM_VENC_ASPECT_RATIO;
 			pdata = &sar;
 			break;
-		case V4L2_CID_MPEG_VIDC_VENC_PARAM_LAYER_BITRATE:
-		{
-			if (control[i].value) {
-				bitrate.layer_id = i;
-				bitrate.bit_rate = control[i].value;
-				property_id = HAL_CONFIG_VENC_TARGET_BITRATE;
-				pdata = &bitrate;
-				dprintk(VIDC_DBG, "bitrate for layer(%d)=%d\n",
-					i, bitrate.bit_rate);
-				rc = call_hfi_op(hdev, session_set_property,
-					(void *)inst->session, property_id,
-					 pdata);
-				if (rc) {
-					dprintk(VIDC_DBG, "prop %x failed\n",
-						property_id);
-					return rc;
-				}
-				if (i == MAX_HYBRID_HIER_P_LAYERS - 1) {
-					dprintk(VIDC_DBG, "HAL property=%x\n",
-						property_id);
-					property_id = 0;
-					rc = 0;
-				}
-			}
-			break;
-		}
 		case V4L2_CID_MPEG_VIDC_VIDEO_BLUR_WIDTH:
 			property_id = HAL_CONFIG_VENC_BLUR_RESOLUTION;
 			blur_res.width = control[i].value;
@@ -2084,92 +2036,83 @@
 			pdata = &blur_res;
 			break;
 		case V4L2_CID_MPEG_VIDC_VIDEO_LAYER_ID:
-			j = i;
-			layer_id = control[j].value;
-			do {
-				switch (control[j].id) {
-				case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP:
-					qp.qpi = control[j].value;
-					qp.layer_id = layer_id;
-					property_id =
-						HAL_CONFIG_VENC_FRAME_QP;
-					pdata = &qp;
-					break;
-				case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP:
-					qp.qpp = control[j].value;
-					qp.layer_id = layer_id;
-					property_id =
-						HAL_CONFIG_VENC_FRAME_QP;
-					pdata = &qp;
-					break;
-				case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP:
-					qp.qpb = control[j].value;
-					qp.layer_id = layer_id;
-					property_id =
-						HAL_CONFIG_VENC_FRAME_QP;
-					pdata = &qp;
-					break;
-				case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MIN:
-					qp_range.qpi_min = control[j].value;
-					qp_range.layer_id = layer_id;
-					property_id =
-						HAL_PARAM_VENC_SESSION_QP_RANGE;
-					pdata = &qp_range;
-					break;
-				case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MIN:
-					qp_range.qpp_min = control[j].value;
-					qp_range.layer_id = layer_id;
-					property_id =
-						HAL_PARAM_VENC_SESSION_QP_RANGE;
-					pdata = &qp_range;
-					break;
-				case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MIN:
-					qp_range.qpb_min = control[j].value;
-					qp_range.layer_id = layer_id;
-					property_id =
-						HAL_PARAM_VENC_SESSION_QP_RANGE;
-					pdata = &qp_range;
-					break;
-				case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MAX:
-					qp_range.qpi_max = control[j].value;
-					qp_range.layer_id = layer_id;
-					property_id =
-						HAL_PARAM_VENC_SESSION_QP_RANGE;
-					pdata = &qp_range;
-					break;
-				case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MAX:
-					qp_range.qpp_max = control[j].value;
-					qp_range.layer_id = layer_id;
-					property_id =
-						HAL_PARAM_VENC_SESSION_QP_RANGE;
-					pdata = &qp_range;
-					break;
-				case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MAX:
-					qp_range.qpb_max = control[j].value;
-					qp_range.layer_id = layer_id;
-					property_id =
-						HAL_PARAM_VENC_SESSION_QP_RANGE;
-					pdata = &qp_range;
-					break;
-				}
-				j++;
-			} while ((j < ctrl->count) &&
-				control[j].id !=
-					V4L2_CID_MPEG_VIDC_VIDEO_LAYER_ID);
-			if (!rc && property_id) {
-				dprintk(VIDC_DBG, "Control: HAL property=%x\n",
-					 property_id);
-				rc = call_hfi_op(hdev, session_set_property,
-						(void *)inst->session,
-						property_id, pdata);
-				if (rc) {
-					dprintk(VIDC_ERR, "prop %x failed\n",
-						property_id);
-					return rc;
-				}
-				property_id = 0;
+			layer_id = control[i].value;
+			i++;
+			while (i < ctrl->count) {
+			switch (control[i].id) {
+			case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP:
+				qp.qpi = control[i].value;
+				qp.layer_id = layer_id;
+				property_id =
+					HAL_CONFIG_VENC_FRAME_QP;
+				pdata = &qp;
+				break;
+			case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP:
+				qp.qpp = control[i].value;
+				qp.layer_id = layer_id;
+				property_id =
+					HAL_CONFIG_VENC_FRAME_QP;
+				pdata = &qp;
+				break;
+			case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP:
+				qp.qpb = control[i].value;
+				qp.layer_id = layer_id;
+				property_id =
+					HAL_CONFIG_VENC_FRAME_QP;
+				pdata = &qp;
+				break;
+			case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MIN:
+				qp_range.qpi_min = control[i].value;
+				qp_range.layer_id = layer_id;
+				property_id =
+					HAL_PARAM_VENC_SESSION_QP_RANGE;
+				pdata = &qp_range;
+				break;
+			case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MIN:
+				qp_range.qpp_min = control[i].value;
+				qp_range.layer_id = layer_id;
+				property_id =
+				HAL_PARAM_VENC_SESSION_QP_RANGE;
+				pdata = &qp_range;
+				break;
+			case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MIN:
+				qp_range.qpb_min = control[i].value;
+				qp_range.layer_id = layer_id;
+				property_id =
+					HAL_PARAM_VENC_SESSION_QP_RANGE;
+				pdata = &qp_range;
+				break;
+			case V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP_MAX:
+				qp_range.qpi_max = control[i].value;
+				qp_range.layer_id = layer_id;
+				property_id =
+					HAL_PARAM_VENC_SESSION_QP_RANGE;
+				pdata = &qp_range;
+				break;
+			case V4L2_CID_MPEG_VIDC_VIDEO_P_FRAME_QP_MAX:
+				qp_range.qpp_max = control[i].value;
+				qp_range.layer_id = layer_id;
+				property_id =
+					HAL_PARAM_VENC_SESSION_QP_RANGE;
+				pdata = &qp_range;
+				break;
+			case V4L2_CID_MPEG_VIDC_VIDEO_B_FRAME_QP_MAX:
+				qp_range.qpb_max = control[i].value;
+				qp_range.layer_id = layer_id;
+				property_id =
+					HAL_PARAM_VENC_SESSION_QP_RANGE;
+				pdata = &qp_range;
+				break;
+			case V4L2_CID_MPEG_VIDC_VENC_PARAM_LAYER_BITRATE:
+				bitrate.bit_rate = control[i].value;
+				bitrate.layer_id = layer_id;
+				property_id =
+					HAL_CONFIG_VENC_TARGET_BITRATE;
+				pdata = &bitrate;
+				break;
 			}
-			i = j - 1;
+			i++;
+			}
 			break;
 		default:
 			dprintk(VIDC_ERR, "Invalid id set: %d\n",
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 114a702..cce1a8e 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -150,6 +150,7 @@
 	case V4L2_CID_MPEG_VIDC_VIDEO_HIER_P_NUM_LAYERS:
 		msm_vidc_ctrl_get_range(ctrl, &inst->capability.hier_p);
 		break;
+	case V4L2_CID_MPEG_VIDC_VENC_PARAM_LAYER_BITRATE:
 	case  V4L2_CID_MPEG_VIDEO_BITRATE:
 		msm_vidc_ctrl_get_range(ctrl, &inst->capability.bitrate);
 		break;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 5e49f42..274eed7 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -23,20 +23,9 @@
 #include "msm_vidc_debug.h"
 #include "msm_vidc_clocks.h"
 
-#define IS_ALREADY_IN_STATE(__p, __d) ({\
-	int __rc = (__p >= __d);\
-	__rc; \
-})
-
-#define SUM_ARRAY(__arr, __start, __end) ({\
-		int __index;\
-		typeof((__arr)[0]) __sum = 0;\
-		for (__index = (__start); __index <= (__end); __index++) {\
-			if (__index >= 0 && __index < ARRAY_SIZE(__arr))\
-				__sum += __arr[__index];\
-		} \
-		__sum;\
-})
+#define IS_ALREADY_IN_STATE(__p, __d) (\
+	(__p >= __d)\
+)
 
 #define V4L2_EVENT_SEQ_CHANGED_SUFFICIENT \
 		V4L2_EVENT_MSM_VIDC_PORT_SETTINGS_CHANGED_SUFFICIENT
@@ -129,7 +118,7 @@
 	};
 
 	rc = msm_comm_g_ctrl(inst, &ctrl);
-	return rc ?: ctrl.value;
+	return rc ? rc : ctrl.value;
 }
 
 static struct v4l2_ctrl **get_super_cluster(struct msm_vidc_inst *inst,
@@ -874,11 +863,13 @@
 
 	/* This should come from sys_init_done */
 	core->resources.max_inst_count =
-		sys_init_msg->max_sessions_supported ? :
+		sys_init_msg->max_sessions_supported ?
+		sys_init_msg->max_sessions_supported :
 		MAX_SUPPORTED_INSTANCES;
 
 	core->resources.max_secure_inst_count =
-		core->resources.max_secure_inst_count ? :
+		core->resources.max_secure_inst_count ?
+		core->resources.max_secure_inst_count :
 		core->resources.max_inst_count;
 
 	if (core->id == MSM_VIDC_CORE_VENUS &&
@@ -1185,6 +1176,9 @@
 			&inst->capability.hier_p);
 	msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDEO_BITRATE,
 			&inst->capability.bitrate);
+	msm_vidc_comm_update_ctrl(inst,
+			V4L2_CID_MPEG_VIDC_VENC_PARAM_LAYER_BITRATE,
+			&inst->capability.bitrate);
 	msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDEO_BITRATE_PEAK,
 			&inst->capability.peakbitrate);
 	msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDC_VIDEO_I_FRAME_QP,
@@ -3850,17 +3844,17 @@
 	 * Don't queue if:
 	 * 1) Hardware isn't ready (that's simple)
 	 */
-	defer = defer ?: inst->state != MSM_VIDC_START_DONE;
+	defer = defer ? defer : (inst->state != MSM_VIDC_START_DONE);
 
 	/*
 	 * 2) The client explicitly tells us not to because it wants this
 	 * buffer to be batched with future frames.  The batch size (on both
 	 * capabilities) is completely determined by the client.
 	 */
-	defer = defer ?: vbuf && vbuf->flags & V4L2_MSM_BUF_FLAG_DEFER;
+	defer = defer ? defer : (vbuf && vbuf->flags & V4L2_MSM_BUF_FLAG_DEFER);
 
 	/* 3) If we're in batch mode, we must have full batches of both types */
-	defer = defer ?: batch_mode && (!output_count || !capture_count);
+	defer = defer ? defer:(batch_mode && (!output_count || !capture_count));
 
 	if (defer) {
 		dprintk(VIDC_DBG, "Deferring queue of %pK\n", vb);
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi.h b/drivers/media/platform/msm/vidc/vidc_hfi.h
index 2a833dc..48a6f17 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi.h
@@ -588,6 +588,7 @@
 struct hfi_frame_cr_stats_type {
 	u32 frame_index;
 	struct hfi_ubwc_cr_stats_info_type ubwc_stats_info;
+	u32 complexity_number;
 };
 
 struct hfi_msg_session_empty_buffer_done_packet {
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_api.h b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
index 8aa0bbb..28bb7ab 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_api.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_api.h
@@ -20,17 +20,15 @@
 #include <media/msm_vidc.h>
 #include "msm_vidc_resources.h"
 
-#define CONTAINS(__a, __sz, __t) ({\
-	int __rc = __t >= __a && \
-			__t < __a + __sz; \
-	__rc; \
-})
+#define CONTAINS(__a, __sz, __t) (\
+	(__t >= __a) && \
+	(__t < __a + __sz) \
+)
 
-#define OVERLAPS(__t, __tsz, __a, __asz) ({\
-	int __rc = __t <= __a && \
-			__t + __tsz >= __a + __asz; \
-	__rc; \
-})
+#define OVERLAPS(__t, __tsz, __a, __asz) (\
+	(__t <= __a) && \
+	(__t + __tsz >= __a + __asz) \
+)
 
 #define HAL_BUFFERFLAG_EOS              0x00000001
 #define HAL_BUFFERFLAG_STARTTIME        0x00000002
@@ -191,7 +189,6 @@
 	HAL_CONFIG_VENC_MAX_BITRATE,
 	HAL_PARAM_VENC_H264_VUI_TIMING_INFO,
 	HAL_PARAM_VENC_GENERATE_AUDNAL,
-	HAL_PARAM_VENC_MAX_NUM_B_FRAMES,
 	HAL_PARAM_BUFFER_ALLOC_MODE,
 	HAL_PARAM_VDEC_FRAME_ASSEMBLY,
 	HAL_PARAM_VENC_PRESERVE_TEXT_QUALITY,
@@ -818,10 +815,6 @@
 	u32 time_scale;
 };
 
-struct hal_h264_vui_bitstream_restrc {
-	u32 enable;
-};
-
 struct hal_preserve_text_quality {
 	u32 enable;
 };
@@ -1018,7 +1011,6 @@
 	struct hal_multi_view_select multi_view_select;
 	struct hal_timestamp_scale timestamp_scale;
 	struct hal_h264_vui_timing_info h264_vui_timing_info;
-	struct hal_h264_vui_bitstream_restrc h264_vui_bitstream_restrc;
 	struct hal_preserve_text_quality preserve_text_quality;
 	struct hal_buffer_info buffer_info;
 	struct hal_buffer_alloc_mode buffer_alloc_mode;
diff --git a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
index 0d73410..bc7e8bd 100644
--- a/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
+++ b/drivers/media/platform/msm/vidc/vidc_hfi_helper.h
@@ -276,8 +276,6 @@
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01D)
 #define HFI_PROPERTY_PARAM_VENC_H264_VUI_TIMING_INFO	\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x01E)
-#define  HFI_PROPERTY_PARAM_VENC_MAX_NUM_B_FRAMES \
-	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x020)
 #define HFI_PROPERTY_PARAM_VENC_LOW_LATENCY_MODE	\
 	(HFI_PROPERTY_PARAM_VENC_COMMON_START + 0x022)
 #define HFI_PROPERTY_PARAM_VENC_PRESERVE_TEXT_QUALITY \
@@ -465,10 +463,6 @@
 	u32 flip;
 };
 
-struct hfi_max_num_b_frames {
-	u32 max_num_b_frames;
-};
-
 struct hfi_conceal_color {
 	u32 conceal_color;
 };
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index d593315..0ac1cf7 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -479,6 +479,14 @@
 	  the genalloc API. It is supposed to be used for small on-chip SRAM
 	  areas found on many SoCs.
 
+config QSEECOM
+        tristate "QTI Secure Execution Communicator driver"
+        help
+          Provides a communication interface between userspace and
+          QTI Secure Execution Environment (QSEE) using Secure Channel
+          Manager (SCM) interface. It exposes APIs for both userspace and
+          kernel clients.
+
 config VEXPRESS_SYSCFG
 	bool "Versatile Express System Configuration driver"
 	depends on VEXPRESS_CONFIG
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index dd12e9a..e1c6ae1 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -49,6 +49,7 @@
 obj-$(CONFIG_SRAM)		+= sram.o
 obj-y				+= mic/
 obj-$(CONFIG_GENWQE)		+= genwqe/
+obj-$(CONFIG_QSEECOM)		+= qseecom.o
 obj-$(CONFIG_ECHO)		+= echo/
 obj-$(CONFIG_VEXPRESS_SYSCFG)	+= vexpress-syscfg.o
 obj-$(CONFIG_CXL_BASE)		+= cxl/
diff --git a/drivers/misc/compat_qseecom.c b/drivers/misc/compat_qseecom.c
new file mode 100644
index 0000000..96d200f
--- /dev/null
+++ b/drivers/misc/compat_qseecom.c
@@ -0,0 +1,922 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/qseecom.h>
+#include <linux/compat.h>
+#include "compat_qseecom.h"
+
+static int compat_get_qseecom_register_listener_req(
+		struct compat_qseecom_register_listener_req __user *data32,
+		struct qseecom_register_listener_req __user *data)
+{
+	int err;
+	compat_ulong_t listener_id;
+	compat_long_t ifd_data_fd;
+	compat_uptr_t virt_sb_base;
+	compat_ulong_t sb_size;
+
+	err = get_user(listener_id, &data32->listener_id);
+	err |= put_user(listener_id, &data->listener_id);
+	err |= get_user(ifd_data_fd, &data32->ifd_data_fd);
+	err |= put_user(ifd_data_fd, &data->ifd_data_fd);
+
+	err |= get_user(virt_sb_base, &data32->virt_sb_base);
+	/* upper bits won't get set, zero them */
+	err |= put_user(NULL, &data->virt_sb_base);
+	err |= put_user(virt_sb_base, (compat_uptr_t *)&data->virt_sb_base);
+
+	err |= get_user(sb_size, &data32->sb_size);
+	err |= put_user(sb_size, &data->sb_size);
+	return err;
+}
+
+static int compat_get_qseecom_load_img_req(
+		struct compat_qseecom_load_img_req __user *data32,
+		struct qseecom_load_img_req __user *data)
+{
+	int err;
+	compat_ulong_t mdt_len;
+	compat_ulong_t img_len;
+	compat_long_t ifd_data_fd;
+	compat_ulong_t app_arch;
+	compat_uint_t app_id;
+
+	err = get_user(mdt_len, &data32->mdt_len);
+	err |= put_user(mdt_len, &data->mdt_len);
+	err |= get_user(img_len, &data32->img_len);
+	err |= put_user(img_len, &data->img_len);
+	err |= get_user(ifd_data_fd, &data32->ifd_data_fd);
+	err |= put_user(ifd_data_fd, &data->ifd_data_fd);
+	err |= copy_in_user(data->img_name, data32->img_name,
+				MAX_APP_NAME_SIZE);
+	err |= get_user(app_arch, &data32->app_arch);
+	err |= put_user(app_arch, &data->app_arch);
+	err |= get_user(app_id, &data32->app_id);
+	err |= put_user(app_id, &data->app_id);
+	return err;
+}
+
+static int compat_get_qseecom_send_cmd_req(
+		struct compat_qseecom_send_cmd_req __user *data32,
+		struct qseecom_send_cmd_req __user *data)
+{
+	int err;
+	compat_uptr_t cmd_req_buf;
+	compat_uint_t cmd_req_len;
+	compat_uptr_t resp_buf;
+	compat_uint_t resp_len;
+
+	err = get_user(cmd_req_buf, &data32->cmd_req_buf);
+	err |= put_user(NULL, &data->cmd_req_buf);
+	err |= put_user(cmd_req_buf, (compat_uptr_t *)&data->cmd_req_buf);
+	err |= get_user(cmd_req_len, &data32->cmd_req_len);
+	err |= put_user(cmd_req_len, &data->cmd_req_len);
+
+	err |= get_user(resp_buf, &data32->resp_buf);
+	err |= put_user(NULL, &data->resp_buf);
+	err |= put_user(resp_buf, (compat_uptr_t *)&data->resp_buf);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+	return err;
+}
+
+static int compat_get_qseecom_send_modfd_cmd_req(
+		struct compat_qseecom_send_modfd_cmd_req __user *data32,
+		struct qseecom_send_modfd_cmd_req __user *data)
+{
+	int err;
+	unsigned int i;
+	compat_uptr_t cmd_req_buf;
+	compat_uint_t cmd_req_len;
+	compat_uptr_t resp_buf;
+	compat_uint_t resp_len;
+	compat_long_t fd;
+	compat_ulong_t cmd_buf_offset;
+
+	err = get_user(cmd_req_buf, &data32->cmd_req_buf);
+	err |= put_user(NULL, &data->cmd_req_buf);
+	err |= put_user(cmd_req_buf, (compat_uptr_t *)&data->cmd_req_buf);
+	err |= get_user(cmd_req_len, &data32->cmd_req_len);
+	err |= put_user(cmd_req_len, &data->cmd_req_len);
+	err |= get_user(resp_buf, &data32->resp_buf);
+	err |= put_user(NULL, &data->resp_buf);
+	err |= put_user(resp_buf, (compat_uptr_t *)&data->resp_buf);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+	for (i = 0; i < MAX_ION_FD; i++) {
+		err |= get_user(fd, &data32->ifd_data[i].fd);
+		err |= put_user(fd, &data->ifd_data[i].fd);
+		err |= get_user(cmd_buf_offset,
+				&data32->ifd_data[i].cmd_buf_offset);
+		err |= put_user(cmd_buf_offset,
+				&data->ifd_data[i].cmd_buf_offset);
+	}
+	return err;
+}
+
+static int compat_get_qseecom_set_sb_mem_param_req(
+		struct compat_qseecom_set_sb_mem_param_req __user *data32,
+		struct qseecom_set_sb_mem_param_req __user *data)
+{
+	int err;
+	compat_long_t ifd_data_fd;
+	compat_uptr_t virt_sb_base;
+	compat_ulong_t sb_len;
+
+	err = get_user(ifd_data_fd, &data32->ifd_data_fd);
+	err |= put_user(ifd_data_fd, &data->ifd_data_fd);
+	err |= get_user(virt_sb_base, &data32->virt_sb_base);
+	err |= put_user(NULL, &data->virt_sb_base);
+	err |= put_user(virt_sb_base, (compat_uptr_t *)&data->virt_sb_base);
+	err |= get_user(sb_len, &data32->sb_len);
+	err |= put_user(sb_len, &data->sb_len);
+	return err;
+}
+
+static int compat_get_qseecom_qseos_version_req(
+		struct compat_qseecom_qseos_version_req __user *data32,
+		struct qseecom_qseos_version_req __user *data)
+{
+	int err;
+	compat_uint_t qseos_version;
+
+	err = get_user(qseos_version, &data32->qseos_version);
+	err |= put_user(qseos_version, &data->qseos_version);
+	return err;
+}
+
+static int compat_get_qseecom_qseos_app_load_query(
+		struct compat_qseecom_qseos_app_load_query __user *data32,
+		struct qseecom_qseos_app_load_query __user *data)
+{
+	int err = 0;
+	unsigned int i;
+	compat_uint_t app_id;
+	char app_name;
+	compat_ulong_t app_arch;
+
+	for (i = 0; i < MAX_APP_NAME_SIZE; i++) {
+		err |= get_user(app_name, &(data32->app_name[i]));
+		err |= put_user(app_name, &(data->app_name[i]));
+	}
+	err |= get_user(app_id, &data32->app_id);
+	err |= put_user(app_id, &data->app_id);
+	err |= get_user(app_arch, &data32->app_arch);
+	err |= put_user(app_arch, &data->app_arch);
+	return err;
+}
+
+static int compat_get_qseecom_send_svc_cmd_req(
+		struct compat_qseecom_send_svc_cmd_req __user *data32,
+		struct qseecom_send_svc_cmd_req __user *data)
+{
+	int err;
+	compat_ulong_t cmd_id;
+	compat_uptr_t cmd_req_buf;
+	compat_uint_t cmd_req_len;
+	compat_uptr_t resp_buf;
+	compat_uint_t resp_len;
+
+	err = get_user(cmd_id, &data32->cmd_id);
+	err |= put_user(cmd_id, &data->cmd_id);
+	err |= get_user(cmd_req_buf, &data32->cmd_req_buf);
+	err |= put_user(NULL, &data->cmd_req_buf);
+	err |= put_user(cmd_req_buf, (compat_uptr_t *)&data->cmd_req_buf);
+	err |= get_user(cmd_req_len, &data32->cmd_req_len);
+	err |= put_user(cmd_req_len, &data->cmd_req_len);
+	err |= get_user(resp_buf, &data32->resp_buf);
+	err |= put_user(NULL, &data->resp_buf);
+	err |= put_user(resp_buf, (compat_uptr_t *)&data->resp_buf);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+	return err;
+}
+
+static int compat_get_qseecom_create_key_req(
+		struct compat_qseecom_create_key_req __user *data32,
+		struct qseecom_create_key_req __user *data)
+{
+	int err;
+	compat_uint_t usage;
+
+	err = copy_in_user(data->hash32, data32->hash32, QSEECOM_HASH_SIZE);
+	err |= get_user(usage, &data32->usage);
+	err |= put_user(usage, &data->usage);
+
+	return err;
+}
+
+static int compat_get_qseecom_wipe_key_req(
+		struct compat_qseecom_wipe_key_req __user *data32,
+		struct qseecom_wipe_key_req __user *data)
+{
+	int err;
+	compat_uint_t usage;
+	compat_int_t wipe_key_flag;
+
+	err = get_user(usage, &data32->usage);
+	err |= put_user(usage, &data->usage);
+	err |= get_user(wipe_key_flag, &data32->wipe_key_flag);
+	err |= put_user(wipe_key_flag, &data->wipe_key_flag);
+
+	return err;
+}
+
+static int compat_get_qseecom_update_key_userinfo_req(
+		struct compat_qseecom_update_key_userinfo_req __user *data32,
+		struct qseecom_update_key_userinfo_req __user *data)
+{
+	int err = 0;
+	compat_uint_t usage;
+
+	err = copy_in_user(data->current_hash32, data32->current_hash32,
+				QSEECOM_HASH_SIZE);
+	err |= copy_in_user(data->new_hash32, data32->new_hash32,
+				QSEECOM_HASH_SIZE);
+	err |= get_user(usage, &data32->usage);
+	err |= put_user(usage, &data->usage);
+
+	return err;
+}
+
+static int compat_get_qseecom_save_partition_hash_req(
+		struct compat_qseecom_save_partition_hash_req __user *data32,
+		struct qseecom_save_partition_hash_req __user *data)
+{
+	int err;
+	compat_int_t partition_id;
+
+	err = get_user(partition_id, &data32->partition_id);
+	err |= put_user(partition_id, &data->partition_id);
+	err |= copy_in_user(data->digest, data32->digest,
+				SHA256_DIGEST_LENGTH);
+	return err;
+}
+
+static int compat_get_qseecom_is_es_activated_req(
+		struct compat_qseecom_is_es_activated_req __user *data32,
+		struct qseecom_is_es_activated_req __user *data)
+{
+	compat_int_t is_activated;
+	int err;
+
+	err = get_user(is_activated, &data32->is_activated);
+	err |= put_user(is_activated, &data->is_activated);
+	return err;
+}
+
+static int compat_get_qseecom_mdtp_cipher_dip_req(
+		struct compat_qseecom_mdtp_cipher_dip_req __user *data32,
+		struct qseecom_mdtp_cipher_dip_req __user *data)
+{
+	int err;
+	compat_int_t in_buf_size;
+	compat_uptr_t in_buf;
+	compat_int_t out_buf_size;
+	compat_uptr_t out_buf;
+	compat_int_t direction;
+
+	err = get_user(in_buf_size, &data32->in_buf_size);
+	err |= put_user(in_buf_size, &data->in_buf_size);
+	err |= get_user(out_buf_size, &data32->out_buf_size);
+	err |= put_user(out_buf_size, &data->out_buf_size);
+	err |= get_user(direction, &data32->direction);
+	err |= put_user(direction, &data->direction);
+	err |= get_user(in_buf, &data32->in_buf);
+	err |= put_user(NULL, &data->in_buf);
+	err |= put_user(in_buf, (compat_uptr_t *)&data->in_buf);
+	err |= get_user(out_buf, &data32->out_buf);
+	err |= put_user(NULL, &data->out_buf);
+	err |= put_user(out_buf, (compat_uptr_t *)&data->out_buf);
+
+	return err;
+}
+
+static int compat_get_qseecom_send_modfd_listener_resp(
+		struct compat_qseecom_send_modfd_listener_resp __user *data32,
+		struct qseecom_send_modfd_listener_resp __user *data)
+{
+	int err;
+	unsigned int i;
+	compat_uptr_t resp_buf_ptr;
+	compat_uint_t resp_len;
+	compat_long_t fd;
+	compat_ulong_t cmd_buf_offset;
+
+	err = get_user(resp_buf_ptr, &data32->resp_buf_ptr);
+	err |= put_user(NULL, &data->resp_buf_ptr);
+	err |= put_user(resp_buf_ptr, (compat_uptr_t *)&data->resp_buf_ptr);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		err |= get_user(fd, &data32->ifd_data[i].fd);
+		err |= put_user(fd, &data->ifd_data[i].fd);
+		err |= get_user(cmd_buf_offset,
+				&data32->ifd_data[i].cmd_buf_offset);
+		err |= put_user(cmd_buf_offset,
+				&data->ifd_data[i].cmd_buf_offset);
+	}
+	return err;
+}
+
+
+static int compat_get_qseecom_qteec_req(
+		struct compat_qseecom_qteec_req __user *data32,
+		struct qseecom_qteec_req __user *data)
+{
+	compat_uptr_t req_ptr;
+	compat_ulong_t req_len;
+	compat_uptr_t resp_ptr;
+	compat_ulong_t resp_len;
+	int err;
+
+	err = get_user(req_ptr, &data32->req_ptr);
+	err |= put_user(NULL, &data->req_ptr);
+	err |= put_user(req_ptr, (compat_uptr_t *)&data->req_ptr);
+	err |= get_user(req_len, &data32->req_len);
+	err |= put_user(req_len, &data->req_len);
+
+	err |= get_user(resp_ptr, &data32->resp_ptr);
+	err |= put_user(NULL, &data->resp_ptr);
+	err |= put_user(resp_ptr, (compat_uptr_t *)&data->resp_ptr);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+	return err;
+}
+
+static int compat_get_qseecom_qteec_modfd_req(
+		struct compat_qseecom_qteec_modfd_req __user *data32,
+		struct qseecom_qteec_modfd_req __user *data)
+{
+	compat_uptr_t req_ptr;
+	compat_ulong_t req_len;
+	compat_uptr_t resp_ptr;
+	compat_ulong_t resp_len;
+	compat_long_t fd;
+	compat_ulong_t cmd_buf_offset;
+	int err, i;
+
+	err = get_user(req_ptr, &data32->req_ptr);
+	err |= put_user(NULL, &data->req_ptr);
+	err |= put_user(req_ptr, (compat_uptr_t *)&data->req_ptr);
+	err |= get_user(req_len, &data32->req_len);
+	err |= put_user(req_len, &data->req_len);
+
+	err |= get_user(resp_ptr, &data32->resp_ptr);
+	err |= put_user(NULL, &data->resp_ptr);
+	err |= put_user(resp_ptr, (compat_uptr_t *)&data->resp_ptr);
+	err |= get_user(resp_len, &data32->resp_len);
+	err |= put_user(resp_len, &data->resp_len);
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		err |= get_user(fd, &data32->ifd_data[i].fd);
+		err |= put_user(fd, &data->ifd_data[i].fd);
+		err |= get_user(cmd_buf_offset,
+				&data32->ifd_data[i].cmd_buf_offset);
+		err |= put_user(cmd_buf_offset,
+				&data->ifd_data[i].cmd_buf_offset);
+	}
+	return err;
+}
+
+static int compat_get_int(compat_int_t __user *data32,
+		int __user *data)
+{
+	compat_int_t x;
+	int err;
+
+	err = get_user(x, data32);
+	err |= put_user(x, data);
+	return err;
+}
+
+static int compat_put_qseecom_load_img_req(
+		struct compat_qseecom_load_img_req __user *data32,
+		struct qseecom_load_img_req __user *data)
+{
+	int err;
+	compat_ulong_t mdt_len;
+	compat_ulong_t img_len;
+	compat_long_t ifd_data_fd;
+	compat_ulong_t app_arch;
+	compat_int_t app_id;
+
+	err = get_user(mdt_len, &data->mdt_len);
+	err |= put_user(mdt_len, &data32->mdt_len);
+	err |= get_user(img_len, &data->img_len);
+	err |= put_user(img_len, &data32->img_len);
+	err |= get_user(ifd_data_fd, &data->ifd_data_fd);
+	err |= put_user(ifd_data_fd, &data32->ifd_data_fd);
+	err |= copy_in_user(data32->img_name, data->img_name,
+				MAX_APP_NAME_SIZE);
+	err |= get_user(app_arch, &data->app_arch);
+	err |= put_user(app_arch, &data32->app_arch);
+	err |= get_user(app_id, &data->app_id);
+	err |= put_user(app_id, &data32->app_id);
+	return err;
+}
+
+static int compat_put_qseecom_qseos_version_req(
+		struct compat_qseecom_qseos_version_req __user *data32,
+		struct qseecom_qseos_version_req __user *data)
+{
+	compat_uint_t qseos_version;
+	int err;
+
+	err = get_user(qseos_version, &data->qseos_version);
+	err |= put_user(qseos_version, &data32->qseos_version);
+	return err;
+}
+
+static int compat_put_qseecom_qseos_app_load_query(
+		struct compat_qseecom_qseos_app_load_query __user *data32,
+		struct qseecom_qseos_app_load_query __user *data)
+{
+	int err = 0;
+	unsigned int i;
+	compat_int_t app_id;
+	compat_ulong_t app_arch;
+	char app_name;
+
+	for (i = 0; i < MAX_APP_NAME_SIZE; i++) {
+		err |= get_user(app_name, &(data->app_name[i]));
+		err |= put_user(app_name, &(data32->app_name[i]));
+	}
+	err |= get_user(app_id, &data->app_id);
+	err |= put_user(app_id, &data32->app_id);
+	err |= get_user(app_arch, &data->app_arch);
+	err |= put_user(app_arch, &data32->app_arch);
+
+	return err;
+}
+
+static int compat_put_qseecom_is_es_activated_req(
+		struct compat_qseecom_is_es_activated_req __user *data32,
+		struct qseecom_is_es_activated_req __user *data)
+{
+	compat_int_t is_activated;
+	int err;
+
+	err = get_user(is_activated, &data->is_activated);
+	err |= put_user(is_activated, &data32->is_activated);
+	return err;
+}
+
+static unsigned int convert_cmd(unsigned int cmd)
+{
+	switch (cmd) {
+	case COMPAT_QSEECOM_IOCTL_REGISTER_LISTENER_REQ:
+		return QSEECOM_IOCTL_REGISTER_LISTENER_REQ;
+	case COMPAT_QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ:
+		return QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ;
+	case COMPAT_QSEECOM_IOCTL_LOAD_APP_REQ:
+		return QSEECOM_IOCTL_LOAD_APP_REQ;
+	case COMPAT_QSEECOM_IOCTL_RECEIVE_REQ:
+		return QSEECOM_IOCTL_RECEIVE_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_RESP_REQ:
+		return QSEECOM_IOCTL_SEND_RESP_REQ;
+	case COMPAT_QSEECOM_IOCTL_UNLOAD_APP_REQ:
+		return QSEECOM_IOCTL_UNLOAD_APP_REQ;
+	case COMPAT_QSEECOM_IOCTL_PERF_ENABLE_REQ:
+		return QSEECOM_IOCTL_PERF_ENABLE_REQ;
+	case COMPAT_QSEECOM_IOCTL_PERF_DISABLE_REQ:
+		return QSEECOM_IOCTL_PERF_DISABLE_REQ;
+	case COMPAT_QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ:
+		return QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ;
+	case COMPAT_QSEECOM_IOCTL_SET_BUS_SCALING_REQ:
+		return QSEECOM_IOCTL_SET_BUS_SCALING_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_CMD_REQ:
+		return QSEECOM_IOCTL_SEND_CMD_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
+		return QSEECOM_IOCTL_SEND_MODFD_CMD_REQ;
+	case COMPAT_QSEECOM_IOCTL_SET_MEM_PARAM_REQ:
+		return QSEECOM_IOCTL_SET_MEM_PARAM_REQ;
+	case COMPAT_QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ:
+		return QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ;
+	case COMPAT_QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ:
+		return QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ;
+	case COMPAT_QSEECOM_IOCTL_APP_LOADED_QUERY_REQ:
+		return QSEECOM_IOCTL_APP_LOADED_QUERY_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ:
+		return QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ;
+	case COMPAT_QSEECOM_IOCTL_CREATE_KEY_REQ:
+		return QSEECOM_IOCTL_CREATE_KEY_REQ;
+	case COMPAT_QSEECOM_IOCTL_WIPE_KEY_REQ:
+		return QSEECOM_IOCTL_WIPE_KEY_REQ;
+	case COMPAT_QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ:
+		return QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ;
+	case COMPAT_QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ:
+		return QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ;
+	case COMPAT_QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ:
+		return QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP:
+		return QSEECOM_IOCTL_SEND_MODFD_RESP;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ:
+		return QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ:
+		return QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ:
+		return QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ:
+		return QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ;
+	case COMPAT_QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ:
+		return QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ:
+		return QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP_64:
+		return QSEECOM_IOCTL_SEND_MODFD_RESP_64;
+
+	default:
+		return cmd;
+	}
+}
+
+long compat_qseecom_ioctl(struct file *file,
+		unsigned int cmd, unsigned long arg)
+{
+	long ret;
+
+	switch (cmd) {
+
+	case COMPAT_QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ:
+	case COMPAT_QSEECOM_IOCTL_RECEIVE_REQ:
+	case COMPAT_QSEECOM_IOCTL_SEND_RESP_REQ:
+	case COMPAT_QSEECOM_IOCTL_UNLOAD_APP_REQ:
+	case COMPAT_QSEECOM_IOCTL_PERF_ENABLE_REQ:
+	case COMPAT_QSEECOM_IOCTL_PERF_DISABLE_REQ:
+	case COMPAT_QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
+		return qseecom_ioctl(file, convert_cmd(cmd), 0);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
+		struct compat_qseecom_register_listener_req __user *data32;
+		struct qseecom_register_listener_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_register_listener_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_LOAD_APP_REQ: {
+		struct compat_qseecom_load_img_req __user *data32;
+		struct qseecom_load_img_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_load_img_req(data32, data);
+		if (err)
+			return err;
+
+		ret = qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+		err = compat_put_qseecom_load_img_req(data32, data);
+		return ret ? ret : err;
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SEND_CMD_REQ: {
+		struct compat_qseecom_send_cmd_req __user *data32;
+		struct qseecom_send_cmd_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_send_cmd_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
+		struct compat_qseecom_send_modfd_cmd_req __user *data32;
+		struct qseecom_send_modfd_cmd_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_send_modfd_cmd_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
+		struct compat_qseecom_set_sb_mem_param_req __user *data32;
+		struct qseecom_set_sb_mem_param_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_set_sb_mem_param_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
+		struct compat_qseecom_qseos_version_req __user *data32;
+		struct qseecom_qseos_version_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_qseos_version_req(data32, data);
+		if (err)
+			return err;
+
+		ret = qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+		err = compat_put_qseecom_qseos_version_req(data32, data);
+
+		return ret ? ret : err;
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
+		compat_int_t __user *data32;
+		int __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+		err = compat_get_int(data32, data);
+		if (err)
+			return err;
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
+		struct compat_qseecom_load_img_req __user *data32;
+		struct qseecom_load_img_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_load_img_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
+		struct compat_qseecom_qseos_app_load_query __user *data32;
+		struct qseecom_qseos_app_load_query __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_qseos_app_load_query(data32, data);
+		if (err)
+			return err;
+
+		ret = qseecom_ioctl(file, convert_cmd(cmd),
+					(unsigned long)data);
+		err = compat_put_qseecom_qseos_app_load_query(data32, data);
+		return ret ? ret : err;
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
+		struct compat_qseecom_send_svc_cmd_req __user *data32;
+		struct qseecom_send_svc_cmd_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_send_svc_cmd_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_CREATE_KEY_REQ: {
+		struct compat_qseecom_create_key_req __user *data32;
+		struct qseecom_create_key_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_create_key_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_WIPE_KEY_REQ: {
+		struct compat_qseecom_wipe_key_req __user *data32;
+		struct qseecom_wipe_key_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_wipe_key_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
+		struct compat_qseecom_update_key_userinfo_req __user *data32;
+		struct qseecom_update_key_userinfo_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_update_key_userinfo_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
+		struct compat_qseecom_save_partition_hash_req __user *data32;
+		struct qseecom_save_partition_hash_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_save_partition_hash_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
+		struct compat_qseecom_is_es_activated_req __user *data32;
+		struct qseecom_is_es_activated_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_is_es_activated_req(data32, data);
+		if (err)
+			return err;
+
+		ret = qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+		err = compat_put_qseecom_is_es_activated_req(data32, data);
+		return ret ? ret : err;
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
+		struct compat_qseecom_mdtp_cipher_dip_req __user *data32;
+		struct qseecom_mdtp_cipher_dip_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_mdtp_cipher_dip_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP:
+	case COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
+		struct compat_qseecom_send_modfd_listener_resp __user *data32;
+		struct qseecom_send_modfd_listener_resp __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_send_modfd_listener_resp(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
+		struct compat_qseecom_qteec_req __user *data32;
+		struct qseecom_qteec_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_qteec_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	case COMPAT_QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ:
+	case COMPAT_QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ:
+	case COMPAT_QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
+		struct compat_qseecom_qteec_modfd_req __user *data32;
+		struct qseecom_qteec_modfd_req __user *data;
+		int err;
+
+		data32 = compat_ptr(arg);
+		data = compat_alloc_user_space(sizeof(*data));
+		if (data == NULL)
+			return -EFAULT;
+
+		err = compat_get_qseecom_qteec_modfd_req(data32, data);
+		if (err)
+			return err;
+
+		return qseecom_ioctl(file, convert_cmd(cmd),
+						(unsigned long)data);
+	}
+	break;
+	default:
+		return -ENOIOCTLCMD;
+	break;
+	}
+	return 0;
+}
diff --git a/drivers/misc/compat_qseecom.h b/drivers/misc/compat_qseecom.h
new file mode 100644
index 0000000..fa76d4c
--- /dev/null
+++ b/drivers/misc/compat_qseecom.h
@@ -0,0 +1,333 @@
+#ifndef _UAPI_COMPAT_QSEECOM_H_
+#define _UAPI_COMPAT_QSEECOM_H_
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#if IS_ENABLED(CONFIG_COMPAT)
+#include <linux/compat.h>
+
+/*
+ * struct compat_qseecom_register_listener_req -
+ *      for register listener ioctl request
+ * @listener_id - service id (shared between userspace and QSE)
+ * @ifd_data_fd - ion handle
+ * @virt_sb_base - shared buffer base in user space
+ * @sb_size - shared buffer size
+ */
+struct compat_qseecom_register_listener_req {
+	compat_ulong_t listener_id; /* in */
+	compat_long_t ifd_data_fd; /* in */
+	compat_uptr_t virt_sb_base; /* in */
+	compat_ulong_t sb_size; /* in */
+};
+
+/*
+ * struct compat_qseecom_send_cmd_req - for send command ioctl request
+ * @cmd_req_len - command buffer length
+ * @cmd_req_buf - command buffer
+ * @resp_len - response buffer length
+ * @resp_buf - response buffer
+ */
+struct compat_qseecom_send_cmd_req {
+	compat_uptr_t cmd_req_buf; /* in */
+	compat_uint_t cmd_req_len; /* in */
+	compat_uptr_t resp_buf; /* in/out */
+	compat_uint_t resp_len; /* in/out */
+};
+
+/*
+ * struct qseecom_ion_fd_info - ion fd handle data information
+ * @fd - ion handle to some memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct compat_qseecom_ion_fd_info {
+	compat_long_t fd;
+	compat_ulong_t cmd_buf_offset;
+};
+/*
+ * struct qseecom_send_modfd_cmd_req - for send command ioctl request
+ * @cmd_req_len - command buffer length
+ * @cmd_req_buf - command buffer
+ * @resp_len - response buffer length
+ * @resp_buf - response buffer
+ * @ifd_data_fd - ion handle to memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct compat_qseecom_send_modfd_cmd_req {
+	compat_uptr_t cmd_req_buf; /* in */
+	compat_uint_t cmd_req_len; /* in */
+	compat_uptr_t resp_buf; /* in/out */
+	compat_uint_t resp_len; /* in/out */
+	struct compat_qseecom_ion_fd_info ifd_data[MAX_ION_FD];
+};
+
+/*
+ * struct compat_qseecom_listener_send_resp_req
+ * signal to continue the send_cmd req.
+ * Used as a trigger from HLOS service to notify QSEECOM that it's done with its
+ * operation and provide the response for QSEECOM can continue the incomplete
+ * command execution
+ * @resp_len - Length of the response
+ * @resp_buf - Response buffer where the response of the cmd should go.
+ */
+struct compat_qseecom_send_resp_req {
+	compat_uptr_t resp_buf; /* in */
+	compat_uint_t resp_len; /* in */
+};
+
+/*
+ * struct compat_qseecom_load_img_data
+ * for sending image length information and
+ * ion file descriptor to the qseecom driver. ion file descriptor is used
+ * for retrieving the ion file handle and in turn the physical address of
+ * the image location.
+ * @mdt_len - Length of the .mdt file in bytes.
+ * @img_len - Length of the .mdt + .b00 +..+.bxx images files in bytes
+ * @ion_fd - Ion file descriptor used when allocating memory.
+ * @img_name - Name of the image.
+ */
+struct compat_qseecom_load_img_req {
+	compat_ulong_t mdt_len; /* in */
+	compat_ulong_t img_len; /* in */
+	compat_long_t  ifd_data_fd; /* in */
+	char	 img_name[MAX_APP_NAME_SIZE]; /* in */
+	compat_ulong_t app_arch; /* in */
+	compat_uint_t app_id; /* out*/
+};
+
+struct compat_qseecom_set_sb_mem_param_req {
+	compat_long_t ifd_data_fd; /* in */
+	compat_uptr_t virt_sb_base; /* in */
+	compat_ulong_t sb_len; /* in */
+};
+
+/*
+ * struct compat_qseecom_qseos_version_req - get qseos version
+ * @qseos_version - version number
+ */
+struct compat_qseecom_qseos_version_req {
+	compat_uint_t qseos_version; /* in */
+};
+
+/*
+ * struct compat_qseecom_qseos_app_load_query - verify if app is loaded in qsee
+ * @app_name[MAX_APP_NAME_SIZE]-  name of the app.
+ * @app_id - app id.
+ */
+struct compat_qseecom_qseos_app_load_query {
+	char app_name[MAX_APP_NAME_SIZE]; /* in */
+	compat_uint_t app_id; /* out */
+	compat_ulong_t app_arch;
+};
+
+struct compat_qseecom_send_svc_cmd_req {
+	compat_ulong_t cmd_id;
+	compat_uptr_t cmd_req_buf; /* in */
+	compat_uint_t cmd_req_len; /* in */
+	compat_uptr_t resp_buf; /* in/out */
+	compat_uint_t resp_len; /* in/out */
+};
+
+struct compat_qseecom_create_key_req {
+	unsigned char hash32[QSEECOM_HASH_SIZE];
+	enum qseecom_key_management_usage_type usage;
+};
+
+struct compat_qseecom_wipe_key_req {
+	enum qseecom_key_management_usage_type usage;
+	compat_int_t wipe_key_flag;
+};
+
+struct compat_qseecom_update_key_userinfo_req {
+	unsigned char current_hash32[QSEECOM_HASH_SIZE];
+	unsigned char new_hash32[QSEECOM_HASH_SIZE];
+	enum qseecom_key_management_usage_type usage;
+};
+
+/*
+ * struct compat_qseecom_save_partition_hash_req
+ * @partition_id - partition id.
+ * @hash[SHA256_DIGEST_LENGTH] -  sha256 digest.
+ */
+struct compat_qseecom_save_partition_hash_req {
+	compat_int_t partition_id; /* in */
+	char digest[SHA256_DIGEST_LENGTH]; /* in */
+};
+
+/*
+ * struct compat_qseecom_is_es_activated_req
+ * @is_activated - 1=true , 0=false
+ */
+struct compat_qseecom_is_es_activated_req {
+	compat_int_t is_activated; /* out */
+};
+
+/*
+ * struct compat_qseecom_mdtp_cipher_dip_req
+ * @in_buf - input buffer
+ * @in_buf_size - input buffer size
+ * @out_buf - output buffer
+ * @out_buf_size - output buffer size
+ * @direction - 0=encrypt, 1=decrypt
+ */
+struct compat_qseecom_mdtp_cipher_dip_req {
+	compat_uptr_t in_buf;
+	compat_uint_t in_buf_size;
+	compat_uptr_t out_buf;
+	compat_uint_t out_buf_size;
+	compat_uint_t direction;
+};
+
+/*
+ * struct qseecom_send_modfd_resp - for send command ioctl request
+ * @req_len - command buffer length
+ * @req_buf - command buffer
+ * @ifd_data_fd - ion handle to memory allocated in user space
+ * @cmd_buf_offset - command buffer offset
+ */
+struct compat_qseecom_send_modfd_listener_resp {
+	compat_uptr_t resp_buf_ptr; /* in */
+	compat_uint_t resp_len; /* in */
+	struct compat_qseecom_ion_fd_info ifd_data[MAX_ION_FD]; /* in */
+};
+
+struct compat_qseecom_qteec_req {
+	compat_uptr_t req_ptr;
+	compat_ulong_t req_len;
+	compat_uptr_t resp_ptr;
+	compat_ulong_t resp_len;
+};
+
+struct compat_qseecom_qteec_modfd_req {
+	compat_uptr_t req_ptr;
+	compat_ulong_t req_len;
+	compat_uptr_t resp_ptr;
+	compat_ulong_t resp_len;
+	struct compat_qseecom_ion_fd_info ifd_data[MAX_ION_FD];
+};
+
+struct compat_qseecom_ce_pipe_entry {
+	compat_int_t valid;
+	compat_uint_t ce_num;
+	compat_uint_t ce_pipe_pair;
+};
+
+struct compat_qseecom_ce_info_req {
+	unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
+	compat_uint_t usage;
+	compat_uint_t unit_num;
+	compat_uint_t num_ce_pipe_entries;
+	struct compat_qseecom_ce_pipe_entry
+				ce_pipe_entry[MAX_CE_PIPE_PAIR_PER_UNIT];
+};
+
+struct file;
+extern long compat_qseecom_ioctl(struct file *file,
+					unsigned int cmd, unsigned long arg);
+
+#define COMPAT_QSEECOM_IOCTL_REGISTER_LISTENER_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 1, struct compat_qseecom_register_listener_req)
+
+#define COMPAT_QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 2)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 3, struct compat_qseecom_send_cmd_req)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 4, struct compat_qseecom_send_modfd_cmd_req)
+
+#define COMPAT_QSEECOM_IOCTL_RECEIVE_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 5)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_RESP_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 6)
+
+#define COMPAT_QSEECOM_IOCTL_LOAD_APP_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 7, struct compat_qseecom_load_img_req)
+
+#define COMPAT_QSEECOM_IOCTL_SET_MEM_PARAM_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 8, struct compat_qseecom_set_sb_mem_param_req)
+
+#define COMPAT_QSEECOM_IOCTL_UNLOAD_APP_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 9)
+
+#define COMPAT_QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 10, struct compat_qseecom_qseos_version_req)
+
+#define COMPAT_QSEECOM_IOCTL_PERF_ENABLE_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 11)
+
+#define COMPAT_QSEECOM_IOCTL_PERF_DISABLE_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 12)
+
+#define COMPAT_QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 13, struct compat_qseecom_load_img_req)
+
+#define COMPAT_QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ \
+	_IO(QSEECOM_IOC_MAGIC, 14)
+
+#define COMPAT_QSEECOM_IOCTL_APP_LOADED_QUERY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 15, struct compat_qseecom_qseos_app_load_query)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 16, struct compat_qseecom_send_svc_cmd_req)
+
+#define COMPAT_QSEECOM_IOCTL_CREATE_KEY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 17, struct compat_qseecom_create_key_req)
+
+#define COMPAT_QSEECOM_IOCTL_WIPE_KEY_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 18, struct compat_qseecom_wipe_key_req)
+
+#define COMPAT_QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 19, \
+				struct compat_qseecom_save_partition_hash_req)
+
+#define COMPAT_QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 20, struct compat_qseecom_is_es_activated_req)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP \
+	_IOWR(QSEECOM_IOC_MAGIC, 21, \
+				struct compat_qseecom_send_modfd_listener_resp)
+
+#define COMPAT_QSEECOM_IOCTL_SET_BUS_SCALING_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 23, int)
+
+#define COMPAT_QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 24, \
+			struct compat_qseecom_update_key_userinfo_req)
+
+#define COMPAT_QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 30, struct compat_qseecom_qteec_modfd_req)
+
+#define COMPAT_QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 31, struct compat_qseecom_qteec_req)
+
+#define COMPAT_QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 32, struct compat_qseecom_qteec_modfd_req)
+
+#define COMPAT_QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 33, struct compat_qseecom_qteec_modfd_req)
+
+#define COMPAT_QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 34, struct qseecom_mdtp_cipher_dip_req)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ \
+	_IOWR(QSEECOM_IOC_MAGIC, 35, struct compat_qseecom_send_modfd_cmd_req)
+
+#define COMPAT_QSEECOM_IOCTL_SEND_MODFD_RESP_64 \
+	_IOWR(QSEECOM_IOC_MAGIC, 36, \
+				struct compat_qseecom_send_modfd_listener_resp)
+#define COMPAT_QSEECOM_IOCTL_GET_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 40, \
+				struct compat_qseecom_ce_info_req)
+#define COMPAT_QSEECOM_IOCTL_FREE_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 41, \
+				struct compat_qseecom_ce_info_req)
+#define COMPAT_QSEECOM_IOCTL_QUERY_CE_PIPE_INFO \
+	_IOWR(QSEECOM_IOC_MAGIC, 42, \
+				struct compat_qseecom_ce_info_req)
+
+#endif
+#endif /* _UAPI_COMPAT_QSEECOM_H_ */
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
new file mode 100644
index 0000000..2c02d2d
--- /dev/null
+++ b/drivers/misc/qseecom.c
@@ -0,0 +1,8926 @@
+/*
+ * QTI Secure Execution Environment Communicator (QSEECOM) driver
+ *
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "QSEECOM: %s: " fmt, __func__
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/platform_device.h>
+#include <linux/debugfs.h>
+#include <linux/cdev.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/io.h>
+#include <linux/msm_ion.h>
+#include <linux/types.h>
+#include <linux/clk.h>
+#include <linux/qseecom.h>
+#include <linux/elf.h>
+#include <linux/firmware.h>
+#include <linux/freezer.h>
+#include <linux/scatterlist.h>
+#include <linux/regulator/consumer.h>
+#include <linux/dma-mapping.h>
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/scm.h>
+#include <soc/qcom/socinfo.h>
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <soc/qcom/qseecomi.h>
+#include <asm/cacheflush.h>
+#include "qseecom_kernel.h"
+#include <crypto/ice.h>
+#include <linux/delay.h>
+
+#include <linux/compat.h>
+#include "compat_qseecom.h"
+
+#define QSEECOM_DEV			"qseecom"
+#define QSEOS_VERSION_14		0x14
+#define QSEEE_VERSION_00		0x400000
+#define QSEE_VERSION_01			0x401000
+#define QSEE_VERSION_02			0x402000
+#define QSEE_VERSION_03			0x403000
+#define QSEE_VERSION_04			0x404000
+#define QSEE_VERSION_05			0x405000
+#define QSEE_VERSION_20			0x800000
+#define QSEE_VERSION_40			0x1000000  /* TZ.BF.4.0 */
+
+#define QSEE_CE_CLK_100MHZ		100000000
+#define CE_CLK_DIV			1000000
+
+#define QSEECOM_MAX_SG_ENTRY			512
+#define QSEECOM_SG_ENTRY_MSG_BUF_SZ_64BIT	\
+			(QSEECOM_MAX_SG_ENTRY * SG_ENTRY_SZ_64BIT)
+
+#define QSEECOM_INVALID_KEY_ID  0xff
+
+/* Save partition image hash for authentication check */
+#define	SCM_SAVE_PARTITION_HASH_ID	0x01
+
+/* Check if enterprise security is activate */
+#define	SCM_IS_ACTIVATED_ID		0x02
+
+/* Encrypt/Decrypt Data Integrity Partition (DIP) for MDTP */
+#define SCM_MDTP_CIPHER_DIP		0x01
+
+/* Maximum Allowed Size (128K) of Data Integrity Partition (DIP) for MDTP */
+#define MAX_DIP			0x20000
+
+#define RPMB_SERVICE			0x2000
+#define SSD_SERVICE			0x3000
+
+#define QSEECOM_SEND_CMD_CRYPTO_TIMEOUT	2000
+#define QSEECOM_LOAD_APP_CRYPTO_TIMEOUT	2000
+#define TWO 2
+#define QSEECOM_UFS_ICE_CE_NUM 10
+#define QSEECOM_SDCC_ICE_CE_NUM 20
+#define QSEECOM_ICE_FDE_KEY_INDEX 0
+
+#define PHY_ADDR_4G	(1ULL<<32)
+
+#define QSEECOM_STATE_NOT_READY         0
+#define QSEECOM_STATE_SUSPEND           1
+#define QSEECOM_STATE_READY             2
+#define QSEECOM_ICE_FDE_KEY_SIZE_MASK   2
+
+/*
+ * default ce info unit to 0 for
+ * services which
+ * support only single instance.
+ * Most of services are in this category.
+ */
+#define DEFAULT_CE_INFO_UNIT 0
+#define DEFAULT_NUM_CE_INFO_UNIT 1
+
+enum qseecom_clk_definitions {
+	CLK_DFAB = 0,
+	CLK_SFPB,
+};
+
+enum qseecom_ice_key_size_type {
+	QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE =
+		(0 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+	QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE =
+		(1 << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+	QSEE_ICE_FDE_KEY_SIZE_UNDEFINED =
+		(0xF << QSEECOM_ICE_FDE_KEY_SIZE_MASK),
+};
+
+enum qseecom_client_handle_type {
+	QSEECOM_CLIENT_APP = 1,
+	QSEECOM_LISTENER_SERVICE,
+	QSEECOM_SECURE_SERVICE,
+	QSEECOM_GENERIC,
+	QSEECOM_UNAVAILABLE_CLIENT_APP,
+};
+
+enum qseecom_ce_hw_instance {
+	CLK_QSEE = 0,
+	CLK_CE_DRV,
+	CLK_INVALID,
+};
+
+static struct class *driver_class;
+static dev_t qseecom_device_no;
+
+static DEFINE_MUTEX(qsee_bw_mutex);
+static DEFINE_MUTEX(app_access_lock);
+static DEFINE_MUTEX(clk_access_lock);
+
+struct sglist_info {
+	uint32_t indexAndFlags;
+	uint32_t sizeOrCount;
+};
+
+/*
+ * The 31th bit indicates only one or multiple physical address inside
+ * the request buffer. If it is set,  the index locates a single physical addr
+ * inside the request buffer, and `sizeOrCount` is the size of the memory being
+ * shared at that physical address.
+ * Otherwise, the index locates an array of {start, len} pairs (a
+ * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
+ * that array.
+ *
+ * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
+ * and scatter gather entry sizes are 64-bit values.  Otherwise, 32-bit values.
+ *
+ * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
+ */
+#define SGLISTINFO_SET_INDEX_FLAG(c, s, i)	\
+	((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
+
+#define SGLISTINFO_TABLE_SIZE	(sizeof(struct sglist_info) * MAX_ION_FD)
+
+#define FEATURE_ID_WHITELIST	15	/*whitelist feature id*/
+
+#define MAKE_WHITELIST_VERSION(major, minor, patch) \
+	(((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
+
+struct qseecom_registered_listener_list {
+	struct list_head                 list;
+	struct qseecom_register_listener_req svc;
+	void  *user_virt_sb_base;
+	u8 *sb_virt;
+	phys_addr_t sb_phys;
+	size_t sb_length;
+	struct ion_handle *ihandle; /* Retrieve phy addr */
+	wait_queue_head_t          rcv_req_wq;
+	int                        rcv_req_flag;
+	int                        send_resp_flag;
+	bool                       listener_in_use;
+	/* wq for thread blocked on this listener*/
+	wait_queue_head_t          listener_block_app_wq;
+	struct sglist_info sglistinfo_ptr[MAX_ION_FD];
+	uint32_t sglist_cnt;
+};
+
+struct qseecom_registered_app_list {
+	struct list_head                 list;
+	u32  app_id;
+	u32  ref_cnt;
+	char app_name[MAX_APP_NAME_SIZE];
+	u32  app_arch;
+	bool app_blocked;
+	u32  blocked_on_listener_id;
+};
+
+struct qseecom_registered_kclient_list {
+	struct list_head list;
+	struct qseecom_handle *handle;
+};
+
+struct qseecom_ce_info_use {
+	unsigned char handle[MAX_CE_INFO_HANDLE_SIZE];
+	unsigned int unit_num;
+	unsigned int num_ce_pipe_entries;
+	struct qseecom_ce_pipe_entry *ce_pipe_entry;
+	bool alloc;
+	uint32_t type;
+};
+
+struct ce_hw_usage_info {
+	uint32_t qsee_ce_hw_instance;
+	uint32_t num_fde;
+	struct qseecom_ce_info_use *fde;
+	uint32_t num_pfe;
+	struct qseecom_ce_info_use *pfe;
+};
+
+struct qseecom_clk {
+	enum qseecom_ce_hw_instance instance;
+	struct clk *ce_core_clk;
+	struct clk *ce_clk;
+	struct clk *ce_core_src_clk;
+	struct clk *ce_bus_clk;
+	uint32_t clk_access_cnt;
+};
+
+struct qseecom_control {
+	struct ion_client *ion_clnt;		/* Ion client */
+	struct list_head  registered_listener_list_head;
+	spinlock_t        registered_listener_list_lock;
+
+	struct list_head  registered_app_list_head;
+	spinlock_t        registered_app_list_lock;
+
+	struct list_head   registered_kclient_list_head;
+	spinlock_t        registered_kclient_list_lock;
+
+	wait_queue_head_t send_resp_wq;
+	int               send_resp_flag;
+
+	uint32_t          qseos_version;
+	uint32_t          qsee_version;
+	struct device *pdev;
+	bool  whitelist_support;
+	bool  commonlib_loaded;
+	bool  commonlib64_loaded;
+	struct ion_handle *cmnlib_ion_handle;
+	struct ce_hw_usage_info ce_info;
+
+	int qsee_bw_count;
+	int qsee_sfpb_bw_count;
+
+	uint32_t qsee_perf_client;
+	struct qseecom_clk qsee;
+	struct qseecom_clk ce_drv;
+
+	bool support_bus_scaling;
+	bool support_fde;
+	bool support_pfe;
+	bool fde_key_size;
+	uint32_t  cumulative_mode;
+	enum qseecom_bandwidth_request_mode  current_mode;
+	struct timer_list bw_scale_down_timer;
+	struct work_struct bw_inactive_req_ws;
+	struct cdev cdev;
+	bool timer_running;
+	bool no_clock_support;
+	unsigned int ce_opp_freq_hz;
+	bool appsbl_qseecom_support;
+	uint32_t qsee_reentrancy_support;
+
+	uint32_t app_block_ref_cnt;
+	wait_queue_head_t app_block_wq;
+	atomic_t qseecom_state;
+	int is_apps_region_protected;
+};
+
+struct qseecom_sec_buf_fd_info {
+	bool is_sec_buf_fd;
+	size_t size;
+	void *vbase;
+	dma_addr_t pbase;
+};
+
+struct qseecom_param_memref {
+	uint32_t buffer;
+	uint32_t size;
+};
+
+struct qseecom_client_handle {
+	u32  app_id;
+	u8 *sb_virt;
+	phys_addr_t sb_phys;
+	unsigned long user_virt_sb_base;
+	size_t sb_length;
+	struct ion_handle *ihandle;		/* Retrieve phy addr */
+	char app_name[MAX_APP_NAME_SIZE];
+	u32  app_arch;
+	struct qseecom_sec_buf_fd_info sec_buf_fd[MAX_ION_FD];
+};
+
+struct qseecom_listener_handle {
+	u32               id;
+};
+
+static struct qseecom_control qseecom;
+
+struct qseecom_dev_handle {
+	enum qseecom_client_handle_type type;
+	union {
+		struct qseecom_client_handle client;
+		struct qseecom_listener_handle listener;
+	};
+	bool released;
+	int               abort;
+	wait_queue_head_t abort_wq;
+	atomic_t          ioctl_count;
+	bool  perf_enabled;
+	bool  fast_load_enabled;
+	enum qseecom_bandwidth_request_mode mode;
+	struct sglist_info sglistinfo_ptr[MAX_ION_FD];
+	uint32_t sglist_cnt;
+	bool use_legacy_cmd;
+};
+
+struct qseecom_key_id_usage_desc {
+	uint8_t desc[QSEECOM_KEY_ID_SIZE];
+};
+
+struct qseecom_crypto_info {
+	unsigned int unit_num;
+	unsigned int ce;
+	unsigned int pipe_pair;
+};
+
+static struct qseecom_key_id_usage_desc key_id_array[] = {
+	{
+		.desc = "Undefined Usage Index",
+	},
+
+	{
+		.desc = "Full Disk Encryption",
+	},
+
+	{
+		.desc = "Per File Encryption",
+	},
+
+	{
+		.desc = "UFS ICE Full Disk Encryption",
+	},
+
+	{
+		.desc = "SDCC ICE Full Disk Encryption",
+	},
+};
+
+/* Function proto types */
+static int qsee_vote_for_clock(struct qseecom_dev_handle *, int32_t);
+static void qsee_disable_clock_vote(struct qseecom_dev_handle *, int32_t);
+static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce);
+static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce);
+static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce);
+static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
+					char *cmnlib_name);
+static int qseecom_enable_ice_setup(int usage);
+static int qseecom_disable_ice_setup(int usage);
+static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id);
+static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
+						void __user *argp);
+static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
+						void __user *argp);
+static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
+						void __user *argp);
+
+static int get_qseecom_keymaster_status(char *str)
+{
+	get_option(&str, &qseecom.is_apps_region_protected);
+	return 1;
+}
+__setup("androidboot.keymaster=", get_qseecom_keymaster_status);
+
+static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
+			const void *req_buf, void *resp_buf)
+{
+	int      ret = 0;
+	uint32_t smc_id = 0;
+	uint32_t qseos_cmd_id = 0;
+	struct scm_desc desc = {0};
+	struct qseecom_command_scm_resp *scm_resp = NULL;
+
+	if (!req_buf || !resp_buf) {
+		pr_err("Invalid buffer pointer\n");
+		return -EINVAL;
+	}
+	qseos_cmd_id = *(uint32_t *)req_buf;
+	scm_resp = (struct qseecom_command_scm_resp *)resp_buf;
+
+	switch (svc_id) {
+	case 6: {
+		if (tz_cmd_id == 3) {
+			smc_id = TZ_INFO_GET_FEATURE_VERSION_ID;
+			desc.arginfo = TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID;
+			desc.args[0] = *(uint32_t *)req_buf;
+		} else {
+			pr_err("Unsupported svc_id %d, tz_cmd_id %d\n",
+				svc_id, tz_cmd_id);
+			return -EINVAL;
+		}
+		ret = scm_call2(smc_id, &desc);
+		break;
+	}
+	case SCM_SVC_ES: {
+		switch (tz_cmd_id) {
+		case SCM_SAVE_PARTITION_HASH_ID: {
+			u32 tzbuflen = PAGE_ALIGN(SHA256_DIGEST_LENGTH);
+			struct qseecom_save_partition_hash_req *p_hash_req =
+				(struct qseecom_save_partition_hash_req *)
+				req_buf;
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, p_hash_req->digest,
+				SHA256_DIGEST_LENGTH);
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_ES_SAVE_PARTITION_HASH_ID;
+			desc.arginfo = TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID;
+			desc.args[0] = p_hash_req->partition_id;
+			desc.args[1] = virt_to_phys(tzbuf);
+			desc.args[2] = SHA256_DIGEST_LENGTH;
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		default: {
+			pr_err("tz_cmd_id %d is not supported by scm_call2\n",
+						tz_cmd_id);
+			ret = -EINVAL;
+			break;
+		}
+		} /* end of switch (tz_cmd_id) */
+		break;
+	} /* end of case SCM_SVC_ES */
+	case SCM_SVC_TZSCHEDULER: {
+		switch (qseos_cmd_id) {
+		case QSEOS_APP_START_COMMAND: {
+			struct qseecom_load_app_ireq *req;
+			struct qseecom_load_app_64bit_ireq *req_64bit;
+
+			smc_id = TZ_OS_APP_START_ID;
+			desc.arginfo = TZ_OS_APP_START_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_load_app_ireq *)req_buf;
+				desc.args[0] = req->mdt_len;
+				desc.args[1] = req->img_len;
+				desc.args[2] = req->phy_addr;
+			} else {
+				req_64bit =
+					(struct qseecom_load_app_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->mdt_len;
+				desc.args[1] = req_64bit->img_len;
+				desc.args[2] = req_64bit->phy_addr;
+			}
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_APP_SHUTDOWN_COMMAND: {
+			struct qseecom_unload_app_ireq *req;
+
+			req = (struct qseecom_unload_app_ireq *)req_buf;
+			smc_id = TZ_OS_APP_SHUTDOWN_ID;
+			desc.arginfo = TZ_OS_APP_SHUTDOWN_ID_PARAM_ID;
+			desc.args[0] = req->app_id;
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_APP_LOOKUP_COMMAND: {
+			struct qseecom_check_app_ireq *req;
+			u32 tzbuflen = PAGE_ALIGN(sizeof(req->app_name));
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+			if (!tzbuf)
+				return -ENOMEM;
+			req = (struct qseecom_check_app_ireq *)req_buf;
+			pr_debug("Lookup app_name = %s\n", req->app_name);
+			strlcpy(tzbuf, req->app_name, sizeof(req->app_name));
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_OS_APP_LOOKUP_ID;
+			desc.arginfo = TZ_OS_APP_LOOKUP_ID_PARAM_ID;
+			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[1] = strlen(req->app_name);
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		case QSEOS_APP_REGION_NOTIFICATION: {
+			struct qsee_apps_region_info_ireq *req;
+			struct qsee_apps_region_info_64bit_ireq *req_64bit;
+
+			smc_id = TZ_OS_APP_REGION_NOTIFICATION_ID;
+			desc.arginfo =
+				TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qsee_apps_region_info_ireq *)
+					req_buf;
+				desc.args[0] = req->addr;
+				desc.args[1] = req->size;
+			} else {
+				req_64bit =
+				(struct qsee_apps_region_info_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->addr;
+				desc.args[1] = req_64bit->size;
+			}
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_LOAD_SERV_IMAGE_COMMAND: {
+			struct qseecom_load_lib_image_ireq *req;
+			struct qseecom_load_lib_image_64bit_ireq *req_64bit;
+
+			smc_id = TZ_OS_LOAD_SERVICES_IMAGE_ID;
+			desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_load_lib_image_ireq *)
+					req_buf;
+				desc.args[0] = req->mdt_len;
+				desc.args[1] = req->img_len;
+				desc.args[2] = req->phy_addr;
+			} else {
+				req_64bit =
+				(struct qseecom_load_lib_image_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->mdt_len;
+				desc.args[1] = req_64bit->img_len;
+				desc.args[2] = req_64bit->phy_addr;
+			}
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_UNLOAD_SERV_IMAGE_COMMAND: {
+			smc_id = TZ_OS_UNLOAD_SERVICES_IMAGE_ID;
+			desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_REGISTER_LISTENER: {
+			struct qseecom_register_listener_ireq *req;
+			struct qseecom_register_listener_64bit_ireq *req_64bit;
+
+			desc.arginfo =
+				TZ_OS_REGISTER_LISTENER_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_register_listener_ireq *)
+					req_buf;
+				desc.args[0] = req->listener_id;
+				desc.args[1] = req->sb_ptr;
+				desc.args[2] = req->sb_len;
+			} else {
+				req_64bit =
+				(struct qseecom_register_listener_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->listener_id;
+				desc.args[1] = req_64bit->sb_ptr;
+				desc.args[2] = req_64bit->sb_len;
+			}
+			smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			if (ret) {
+				smc_id = TZ_OS_REGISTER_LISTENER_ID;
+				__qseecom_reentrancy_check_if_no_app_blocked(
+					smc_id);
+				ret = scm_call2(smc_id, &desc);
+			}
+			break;
+		}
+		case QSEOS_DEREGISTER_LISTENER: {
+			struct qseecom_unregister_listener_ireq *req;
+
+			req = (struct qseecom_unregister_listener_ireq *)
+				req_buf;
+			smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
+			desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
+			desc.args[0] = req->listener_id;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_LISTENER_DATA_RSP_COMMAND: {
+			struct qseecom_client_listener_data_irsp *req;
+
+			req = (struct qseecom_client_listener_data_irsp *)
+				req_buf;
+			smc_id = TZ_OS_LISTENER_RESPONSE_HANDLER_ID;
+			desc.arginfo =
+				TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID;
+			desc.args[0] = req->listener_id;
+			desc.args[1] = req->status;
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
+			struct qseecom_client_listener_data_irsp *req;
+			struct qseecom_client_listener_data_64bit_irsp *req_64;
+
+			smc_id =
+			TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
+			desc.arginfo =
+			TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req =
+				(struct qseecom_client_listener_data_irsp *)
+				req_buf;
+				desc.args[0] = req->listener_id;
+				desc.args[1] = req->status;
+				desc.args[2] = req->sglistinfo_ptr;
+				desc.args[3] = req->sglistinfo_len;
+			} else {
+				req_64 =
+			(struct qseecom_client_listener_data_64bit_irsp *)
+				req_buf;
+				desc.args[0] = req_64->listener_id;
+				desc.args[1] = req_64->status;
+				desc.args[2] = req_64->sglistinfo_ptr;
+				desc.args[3] = req_64->sglistinfo_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
+			struct qseecom_load_app_ireq *req;
+			struct qseecom_load_app_64bit_ireq *req_64bit;
+
+			smc_id = TZ_OS_LOAD_EXTERNAL_IMAGE_ID;
+			desc.arginfo = TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_load_app_ireq *)req_buf;
+				desc.args[0] = req->mdt_len;
+				desc.args[1] = req->img_len;
+				desc.args[2] = req->phy_addr;
+			} else {
+				req_64bit =
+				(struct qseecom_load_app_64bit_ireq *)req_buf;
+				desc.args[0] = req_64bit->mdt_len;
+				desc.args[1] = req_64bit->img_len;
+				desc.args[2] = req_64bit->phy_addr;
+			}
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND: {
+			smc_id = TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID;
+			desc.arginfo = TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+			}
+
+		case QSEOS_CLIENT_SEND_DATA_COMMAND: {
+			struct qseecom_client_send_data_ireq *req;
+			struct qseecom_client_send_data_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_QSAPP_SEND_DATA_ID;
+			desc.arginfo = TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_client_send_data_ireq *)
+					req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->rsp_ptr;
+				desc.args[4] = req->rsp_len;
+			} else {
+				req_64bit =
+				(struct qseecom_client_send_data_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->rsp_ptr;
+				desc.args[4] = req_64bit->rsp_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST: {
+			struct qseecom_client_send_data_ireq *req;
+			struct qseecom_client_send_data_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID;
+			desc.arginfo =
+			TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_client_send_data_ireq *)
+					req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->rsp_ptr;
+				desc.args[4] = req->rsp_len;
+				desc.args[5] = req->sglistinfo_ptr;
+				desc.args[6] = req->sglistinfo_len;
+			} else {
+				req_64bit =
+				(struct qseecom_client_send_data_64bit_ireq *)
+					req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->rsp_ptr;
+				desc.args[4] = req_64bit->rsp_len;
+				desc.args[5] = req_64bit->sglistinfo_ptr;
+				desc.args[6] = req_64bit->sglistinfo_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_RPMB_PROVISION_KEY_COMMAND: {
+			struct qseecom_client_send_service_ireq *req;
+
+			req = (struct qseecom_client_send_service_ireq *)
+				req_buf;
+			smc_id = TZ_OS_RPMB_PROVISION_KEY_ID;
+			desc.arginfo = TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID;
+			desc.args[0] = req->key_type;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_RPMB_ERASE_COMMAND: {
+			smc_id = TZ_OS_RPMB_ERASE_ID;
+			desc.arginfo = TZ_OS_RPMB_ERASE_ID_PARAM_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND: {
+			smc_id = TZ_OS_RPMB_CHECK_PROV_STATUS_ID;
+			desc.arginfo = TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_GENERATE_KEY: {
+			u32 tzbuflen = PAGE_ALIGN(sizeof
+				(struct qseecom_key_generate_ireq) -
+				sizeof(uint32_t));
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, req_buf + sizeof(uint32_t),
+				(sizeof(struct qseecom_key_generate_ireq) -
+				sizeof(uint32_t)));
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_OS_KS_GEN_KEY_ID;
+			desc.arginfo = TZ_OS_KS_GEN_KEY_ID_PARAM_ID;
+			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[1] = tzbuflen;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		case QSEOS_DELETE_KEY: {
+			u32 tzbuflen = PAGE_ALIGN(sizeof
+				(struct qseecom_key_delete_ireq) -
+				sizeof(uint32_t));
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, req_buf + sizeof(uint32_t),
+				(sizeof(struct qseecom_key_delete_ireq) -
+				sizeof(uint32_t)));
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_OS_KS_DEL_KEY_ID;
+			desc.arginfo = TZ_OS_KS_DEL_KEY_ID_PARAM_ID;
+			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[1] = tzbuflen;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		case QSEOS_SET_KEY: {
+			u32 tzbuflen = PAGE_ALIGN(sizeof
+				(struct qseecom_key_select_ireq) -
+				sizeof(uint32_t));
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, req_buf + sizeof(uint32_t),
+				(sizeof(struct qseecom_key_select_ireq) -
+				sizeof(uint32_t)));
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_OS_KS_SET_PIPE_KEY_ID;
+			desc.arginfo = TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID;
+			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[1] = tzbuflen;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		case QSEOS_UPDATE_KEY_USERINFO: {
+			u32 tzbuflen = PAGE_ALIGN(sizeof
+				(struct qseecom_key_userinfo_update_ireq) -
+				sizeof(uint32_t));
+			char *tzbuf = kzalloc(tzbuflen, GFP_KERNEL);
+
+			if (!tzbuf)
+				return -ENOMEM;
+			memset(tzbuf, 0, tzbuflen);
+			memcpy(tzbuf, req_buf + sizeof(uint32_t), (sizeof
+				(struct qseecom_key_userinfo_update_ireq) -
+				sizeof(uint32_t)));
+			dmac_flush_range(tzbuf, tzbuf + tzbuflen);
+			smc_id = TZ_OS_KS_UPDATE_KEY_ID;
+			desc.arginfo = TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID;
+			desc.args[0] = virt_to_phys(tzbuf);
+			desc.args[1] = tzbuflen;
+			__qseecom_reentrancy_check_if_no_app_blocked(smc_id);
+			ret = scm_call2(smc_id, &desc);
+			kzfree(tzbuf);
+			break;
+		}
+		case QSEOS_TEE_OPEN_SESSION: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_OPEN_SESSION_ID;
+			desc.arginfo = TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_OPEN_SESSION_WHITELIST: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID;
+			desc.arginfo =
+			TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+				desc.args[5] = req->sglistinfo_ptr;
+				desc.args[6] = req->sglistinfo_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+				desc.args[5] = req_64bit->sglistinfo_ptr;
+				desc.args[6] = req_64bit->sglistinfo_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_INVOKE_COMMAND: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_ID;
+			desc.arginfo = TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_INVOKE_COMMAND_WHITELIST: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID;
+			desc.arginfo =
+			TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+				desc.args[5] = req->sglistinfo_ptr;
+				desc.args[6] = req->sglistinfo_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+				desc.args[5] = req_64bit->sglistinfo_ptr;
+				desc.args[6] = req_64bit->sglistinfo_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_CLOSE_SESSION: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_CLOSE_SESSION_ID;
+			desc.arginfo = TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_TEE_REQUEST_CANCELLATION: {
+			struct qseecom_qteec_ireq *req;
+			struct qseecom_qteec_64bit_ireq *req_64bit;
+
+			smc_id = TZ_APP_GPAPP_REQUEST_CANCELLATION_ID;
+			desc.arginfo =
+				TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID;
+			if (qseecom.qsee_version < QSEE_VERSION_40) {
+				req = (struct qseecom_qteec_ireq *)req_buf;
+				desc.args[0] = req->app_id;
+				desc.args[1] = req->req_ptr;
+				desc.args[2] = req->req_len;
+				desc.args[3] = req->resp_ptr;
+				desc.args[4] = req->resp_len;
+			} else {
+				req_64bit = (struct qseecom_qteec_64bit_ireq *)
+						req_buf;
+				desc.args[0] = req_64bit->app_id;
+				desc.args[1] = req_64bit->req_ptr;
+				desc.args[2] = req_64bit->req_len;
+				desc.args[3] = req_64bit->resp_ptr;
+				desc.args[4] = req_64bit->resp_len;
+			}
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		case QSEOS_CONTINUE_BLOCKED_REQ_COMMAND: {
+			struct qseecom_continue_blocked_request_ireq *req =
+				(struct qseecom_continue_blocked_request_ireq *)
+				req_buf;
+			smc_id = TZ_OS_CONTINUE_BLOCKED_REQUEST_ID;
+			desc.arginfo =
+				TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID;
+			desc.args[0] = req->app_id;
+			ret = scm_call2(smc_id, &desc);
+			break;
+		}
+		default: {
+			pr_err("qseos_cmd_id %d is not supported by armv8 scm_call2.\n",
+						qseos_cmd_id);
+			ret = -EINVAL;
+			break;
+		}
+		} /*end of switch (qsee_cmd_id)  */
+	break;
+	} /*end of case SCM_SVC_TZSCHEDULER*/
+	default: {
+		pr_err("svc_id 0x%x is not supported by armv8 scm_call2.\n",
+					svc_id);
+		ret = -EINVAL;
+		break;
+	}
+	} /*end of switch svc_id */
+	scm_resp->result = desc.ret[0];
+	scm_resp->resp_type = desc.ret[1];
+	scm_resp->data = desc.ret[2];
+	pr_debug("svc_id = 0x%x, tz_cmd_id = 0x%x, qseos_cmd_id = 0x%x, smc_id = 0x%x, param_id = 0x%x\n",
+		svc_id, tz_cmd_id, qseos_cmd_id, smc_id, desc.arginfo);
+	pr_debug("scm_resp->result = 0x%x, scm_resp->resp_type = 0x%x, scm_resp->data = 0x%x\n",
+		scm_resp->result, scm_resp->resp_type, scm_resp->data);
+	return ret;
+}
+
+
+static int qseecom_scm_call(u32 svc_id, u32 tz_cmd_id, const void *cmd_buf,
+		size_t cmd_len, void *resp_buf, size_t resp_len)
+{
+	if (!is_scm_armv8())
+		return scm_call(svc_id, tz_cmd_id, cmd_buf, cmd_len,
+				resp_buf, resp_len);
+	else
+		return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
+}
+
+static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
+		struct qseecom_register_listener_req *svc)
+{
+	struct qseecom_registered_listener_list *ptr;
+	int unique = 1;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+	list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
+		if (ptr->svc.listener_id == svc->listener_id) {
+			pr_err("Service id: %u is already registered\n",
+					ptr->svc.listener_id);
+			unique = 0;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+	return unique;
+}
+
+static struct qseecom_registered_listener_list *__qseecom_find_svc(
+						int32_t listener_id)
+{
+	struct qseecom_registered_listener_list *entry = NULL;
+	unsigned long flags;
+
+	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+	list_for_each_entry(entry,
+			&qseecom.registered_listener_list_head, list) {
+		if (entry->svc.listener_id == listener_id)
+			break;
+	}
+	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+
+	if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
+		pr_err("Service id: %u is not found\n", listener_id);
+		return NULL;
+	}
+
+	return entry;
+}
+
+static int __qseecom_set_sb_memory(struct qseecom_registered_listener_list *svc,
+				struct qseecom_dev_handle *handle,
+				struct qseecom_register_listener_req *listener)
+{
+	int ret = 0;
+	struct qseecom_register_listener_ireq req;
+	struct qseecom_register_listener_64bit_ireq req_64bit;
+	struct qseecom_command_scm_resp resp;
+	ion_phys_addr_t pa;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+
+	/* Get the handle of the shared fd */
+	svc->ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+					listener->ifd_data_fd);
+	if (IS_ERR_OR_NULL(svc->ihandle)) {
+		pr_err("Ion client could not retrieve the handle\n");
+		return -ENOMEM;
+	}
+
+	/* Get the physical address of the ION BUF */
+	ret = ion_phys(qseecom.ion_clnt, svc->ihandle, &pa, &svc->sb_length);
+	if (ret) {
+		pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+			ret);
+		return ret;
+	}
+	/* Populate the structure for sending scm call to load image */
+	svc->sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt, svc->ihandle);
+	if (IS_ERR_OR_NULL(svc->sb_virt)) {
+		pr_err("ION memory mapping for listener shared buffer failed\n");
+		return -ENOMEM;
+	}
+	svc->sb_phys = (phys_addr_t)pa;
+
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		req.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
+		req.listener_id = svc->svc.listener_id;
+		req.sb_len = svc->sb_length;
+		req.sb_ptr = (uint32_t)svc->sb_phys;
+		cmd_buf = (void *)&req;
+		cmd_len = sizeof(struct qseecom_register_listener_ireq);
+	} else {
+		req_64bit.qsee_cmd_id = QSEOS_REGISTER_LISTENER;
+		req_64bit.listener_id = svc->svc.listener_id;
+		req_64bit.sb_len = svc->sb_length;
+		req_64bit.sb_ptr = (uint64_t)svc->sb_phys;
+		cmd_buf = (void *)&req_64bit;
+		cmd_len = sizeof(struct qseecom_register_listener_64bit_ireq);
+	}
+
+	resp.result = QSEOS_RESULT_INCOMPLETE;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+					 &resp, sizeof(resp));
+	if (ret) {
+		pr_err("qseecom_scm_call failed with err: %d\n", ret);
+		return -EINVAL;
+	}
+
+	if (resp.result != QSEOS_RESULT_SUCCESS) {
+		pr_err("Error SB registration req: resp.result = %d\n",
+			resp.result);
+		return -EPERM;
+	}
+	return 0;
+}
+
+static int qseecom_register_listener(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+	int ret = 0;
+	unsigned long flags;
+	struct qseecom_register_listener_req rcvd_lstnr;
+	struct qseecom_registered_listener_list *new_entry;
+
+	ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	if (!access_ok(VERIFY_WRITE, (void __user *)rcvd_lstnr.virt_sb_base,
+			rcvd_lstnr.sb_size))
+		return -EFAULT;
+
+	data->listener.id = 0;
+	if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
+		pr_err("Service is not unique and is already registered\n");
+		data->released = true;
+		return -EBUSY;
+	}
+
+	new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
+	if (!new_entry)
+		return -ENOMEM;
+	memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
+	new_entry->rcv_req_flag = 0;
+
+	new_entry->svc.listener_id = rcvd_lstnr.listener_id;
+	new_entry->sb_length = rcvd_lstnr.sb_size;
+	new_entry->user_virt_sb_base = rcvd_lstnr.virt_sb_base;
+	if (__qseecom_set_sb_memory(new_entry, data, &rcvd_lstnr)) {
+		pr_err("qseecom_set_sb_memoryfailed\n");
+		kzfree(new_entry);
+		return -ENOMEM;
+	}
+
+	data->listener.id = rcvd_lstnr.listener_id;
+	init_waitqueue_head(&new_entry->rcv_req_wq);
+	init_waitqueue_head(&new_entry->listener_block_app_wq);
+	new_entry->send_resp_flag = 0;
+	new_entry->listener_in_use = false;
+	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+	list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
+	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+
+	return ret;
+}
+
+static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+	unsigned long flags;
+	uint32_t unmap_mem = 0;
+	struct qseecom_register_listener_ireq req;
+	struct qseecom_registered_listener_list *ptr_svc = NULL;
+	struct qseecom_command_scm_resp resp;
+	struct ion_handle *ihandle = NULL;		/* Retrieve phy addr */
+
+	req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
+	req.listener_id = data->listener.id;
+	resp.result = QSEOS_RESULT_INCOMPLETE;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+					sizeof(req), &resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
+				ret, data->listener.id);
+		return ret;
+	}
+
+	if (resp.result != QSEOS_RESULT_SUCCESS) {
+		pr_err("Failed resp.result=%d,(lstnr id=%d)\n",
+				resp.result, data->listener.id);
+		return -EPERM;
+	}
+
+	data->abort = 1;
+	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+	list_for_each_entry(ptr_svc, &qseecom.registered_listener_list_head,
+			list) {
+		if (ptr_svc->svc.listener_id == data->listener.id) {
+			wake_up_all(&ptr_svc->rcv_req_wq);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+
+	while (atomic_read(&data->ioctl_count) > 1) {
+		if (wait_event_freezable(data->abort_wq,
+				atomic_read(&data->ioctl_count) <= 1)) {
+			pr_err("Interrupted from abort\n");
+			ret = -ERESTARTSYS;
+			break;
+		}
+	}
+
+	spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
+	list_for_each_entry(ptr_svc,
+			&qseecom.registered_listener_list_head, list) {
+		if (ptr_svc->svc.listener_id == data->listener.id) {
+			if (ptr_svc->sb_virt) {
+				unmap_mem = 1;
+				ihandle = ptr_svc->ihandle;
+			}
+			list_del(&ptr_svc->list);
+			kzfree(ptr_svc);
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
+
+	/* Unmap the memory */
+	if (unmap_mem) {
+		if (!IS_ERR_OR_NULL(ihandle)) {
+			ion_unmap_kernel(qseecom.ion_clnt, ihandle);
+			ion_free(qseecom.ion_clnt, ihandle);
+		}
+	}
+	data->released = true;
+	return ret;
+}
+
+static int __qseecom_set_msm_bus_request(uint32_t mode)
+{
+	int ret = 0;
+	struct qseecom_clk *qclk;
+
+	qclk = &qseecom.qsee;
+	if (qclk->ce_core_src_clk != NULL) {
+		if (mode == INACTIVE) {
+			__qseecom_disable_clk(CLK_QSEE);
+		} else {
+			ret = __qseecom_enable_clk(CLK_QSEE);
+			if (ret)
+				pr_err("CLK enabling failed (%d) MODE (%d)\n",
+							ret, mode);
+		}
+	}
+
+	if ((!ret) && (qseecom.current_mode != mode)) {
+		ret = msm_bus_scale_client_update_request(
+					qseecom.qsee_perf_client, mode);
+		if (ret) {
+			pr_err("Bandwidth req failed(%d) MODE (%d)\n",
+							ret, mode);
+			if (qclk->ce_core_src_clk != NULL) {
+				if (mode == INACTIVE) {
+					ret = __qseecom_enable_clk(CLK_QSEE);
+					if (ret)
+						pr_err("CLK enable failed\n");
+				} else
+					__qseecom_disable_clk(CLK_QSEE);
+			}
+		}
+		qseecom.current_mode = mode;
+	}
+	return ret;
+}
+
+static void qseecom_bw_inactive_req_work(struct work_struct *work)
+{
+	mutex_lock(&app_access_lock);
+	mutex_lock(&qsee_bw_mutex);
+	if (qseecom.timer_running)
+		__qseecom_set_msm_bus_request(INACTIVE);
+	pr_debug("current_mode = %d, cumulative_mode = %d\n",
+				qseecom.current_mode, qseecom.cumulative_mode);
+	qseecom.timer_running = false;
+	mutex_unlock(&qsee_bw_mutex);
+	mutex_unlock(&app_access_lock);
+}
+
+static void qseecom_scale_bus_bandwidth_timer_callback(unsigned long data)
+{
+	schedule_work(&qseecom.bw_inactive_req_ws);
+}
+
+static int __qseecom_decrease_clk_ref_count(enum qseecom_ce_hw_instance ce)
+{
+	struct qseecom_clk *qclk;
+	int ret = 0;
+
+	mutex_lock(&clk_access_lock);
+	if (ce == CLK_QSEE)
+		qclk = &qseecom.qsee;
+	else
+		qclk = &qseecom.ce_drv;
+
+	if (qclk->clk_access_cnt > 2) {
+		pr_err("Invalid clock ref count %d\n", qclk->clk_access_cnt);
+		ret = -EINVAL;
+		goto err_dec_ref_cnt;
+	}
+	if (qclk->clk_access_cnt == 2)
+		qclk->clk_access_cnt--;
+
+err_dec_ref_cnt:
+	mutex_unlock(&clk_access_lock);
+	return ret;
+}
+
+
+static int qseecom_scale_bus_bandwidth_timer(uint32_t mode)
+{
+	int32_t ret = 0;
+	int32_t request_mode = INACTIVE;
+
+	mutex_lock(&qsee_bw_mutex);
+	if (mode == 0) {
+		if (qseecom.cumulative_mode > MEDIUM)
+			request_mode = HIGH;
+		else
+			request_mode = qseecom.cumulative_mode;
+	} else {
+		request_mode = mode;
+	}
+
+	ret = __qseecom_set_msm_bus_request(request_mode);
+	if (ret) {
+		pr_err("set msm bus request failed (%d),request_mode (%d)\n",
+			ret, request_mode);
+		goto err_scale_timer;
+	}
+
+	if (qseecom.timer_running) {
+		ret = __qseecom_decrease_clk_ref_count(CLK_QSEE);
+		if (ret) {
+			pr_err("Failed to decrease clk ref count.\n");
+			goto err_scale_timer;
+		}
+		del_timer_sync(&(qseecom.bw_scale_down_timer));
+		qseecom.timer_running = false;
+	}
+err_scale_timer:
+	mutex_unlock(&qsee_bw_mutex);
+	return ret;
+}
+
+
+static int qseecom_unregister_bus_bandwidth_needs(
+					struct qseecom_dev_handle *data)
+{
+	int32_t ret = 0;
+
+	qseecom.cumulative_mode -= data->mode;
+	data->mode = INACTIVE;
+
+	return ret;
+}
+
+static int __qseecom_register_bus_bandwidth_needs(
+			struct qseecom_dev_handle *data, uint32_t request_mode)
+{
+	int32_t ret = 0;
+
+	if (data->mode == INACTIVE) {
+		qseecom.cumulative_mode += request_mode;
+		data->mode = request_mode;
+	} else {
+		if (data->mode != request_mode) {
+			qseecom.cumulative_mode -= data->mode;
+			qseecom.cumulative_mode += request_mode;
+			data->mode = request_mode;
+		}
+	}
+	return ret;
+}
+
+static int qseecom_perf_enable(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+
+	ret = qsee_vote_for_clock(data, CLK_DFAB);
+	if (ret) {
+		pr_err("Failed to vote for DFAB clock with err %d\n", ret);
+		goto perf_enable_exit;
+	}
+	ret = qsee_vote_for_clock(data, CLK_SFPB);
+	if (ret) {
+		qsee_disable_clock_vote(data, CLK_DFAB);
+		pr_err("Failed to vote for SFPB clock with err %d\n", ret);
+		goto perf_enable_exit;
+	}
+
+perf_enable_exit:
+	return ret;
+}
+
+static int qseecom_scale_bus_bandwidth(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	int32_t ret = 0;
+	int32_t req_mode;
+
+	if (qseecom.no_clock_support)
+		return 0;
+
+	ret = copy_from_user(&req_mode, argp, sizeof(req_mode));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	if (req_mode > HIGH) {
+		pr_err("Invalid bandwidth mode (%d)\n", req_mode);
+		return -EINVAL;
+	}
+
+	/*
+	 * Register bus bandwidth needs if bus scaling feature is enabled;
+	 * otherwise, qseecom enable/disable clocks for the client directly.
+	 */
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, req_mode);
+		mutex_unlock(&qsee_bw_mutex);
+	} else {
+		pr_debug("Bus scaling feature is NOT enabled\n");
+		pr_debug("request bandwidth mode %d for the client\n",
+				req_mode);
+		if (req_mode != INACTIVE) {
+			ret = qseecom_perf_enable(data);
+			if (ret)
+				pr_err("Failed to vote for clock with err %d\n",
+						ret);
+		} else {
+			qsee_disable_clock_vote(data, CLK_DFAB);
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		}
+	}
+	return ret;
+}
+
+static void __qseecom_add_bw_scale_down_timer(uint32_t duration)
+{
+	if (qseecom.no_clock_support)
+		return;
+
+	mutex_lock(&qsee_bw_mutex);
+	qseecom.bw_scale_down_timer.expires = jiffies +
+		msecs_to_jiffies(duration);
+	mod_timer(&(qseecom.bw_scale_down_timer),
+		qseecom.bw_scale_down_timer.expires);
+	qseecom.timer_running = true;
+	mutex_unlock(&qsee_bw_mutex);
+}
+
+static void __qseecom_disable_clk_scale_down(struct qseecom_dev_handle *data)
+{
+	if (!qseecom.support_bus_scaling)
+		qsee_disable_clock_vote(data, CLK_SFPB);
+	else
+		__qseecom_add_bw_scale_down_timer(
+			QSEECOM_LOAD_APP_CRYPTO_TIMEOUT);
+}
+
+static int __qseecom_enable_clk_scale_up(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+
+	if (qseecom.support_bus_scaling) {
+		ret = qseecom_scale_bus_bandwidth_timer(MEDIUM);
+		if (ret)
+			pr_err("Failed to set bw MEDIUM.\n");
+	} else {
+		ret = qsee_vote_for_clock(data, CLK_SFPB);
+		if (ret)
+			pr_err("Fail vote for clk SFPB ret %d\n", ret);
+	}
+	return ret;
+}
+
+static int qseecom_set_client_mem_param(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	ion_phys_addr_t pa;
+	int32_t ret;
+	struct qseecom_set_sb_mem_param_req req;
+	size_t len;
+
+	/* Copy the relevant information needed for loading the image */
+	if (copy_from_user(&req, (void __user *)argp, sizeof(req)))
+		return -EFAULT;
+
+	if ((req.ifd_data_fd <= 0) || (req.virt_sb_base == NULL) ||
+					(req.sb_len == 0)) {
+		pr_err("Inavlid input(s)ion_fd(%d), sb_len(%d), vaddr(0x%pK)\n",
+			req.ifd_data_fd, req.sb_len, req.virt_sb_base);
+		return -EFAULT;
+	}
+	if (!access_ok(VERIFY_WRITE, (void __user *)req.virt_sb_base,
+			req.sb_len))
+		return -EFAULT;
+
+	/* Get the handle of the shared fd */
+	data->client.ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+						req.ifd_data_fd);
+	if (IS_ERR_OR_NULL(data->client.ihandle)) {
+		pr_err("Ion client could not retrieve the handle\n");
+		return -ENOMEM;
+	}
+	/* Get the physical address of the ION BUF */
+	ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
+	if (ret) {
+
+		pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+			ret);
+		return ret;
+	}
+
+	if (len < req.sb_len) {
+		pr_err("Requested length (0x%x) is > allocated (%zu)\n",
+			req.sb_len, len);
+		return -EINVAL;
+	}
+	/* Populate the structure for sending scm call to load image */
+	data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
+							data->client.ihandle);
+	if (IS_ERR_OR_NULL(data->client.sb_virt)) {
+		pr_err("ION memory mapping for client shared buf failed\n");
+		return -ENOMEM;
+	}
+	data->client.sb_phys = (phys_addr_t)pa;
+	data->client.sb_length = req.sb_len;
+	data->client.user_virt_sb_base = (uintptr_t)req.virt_sb_base;
+	return 0;
+}
+
+static int __qseecom_listener_has_sent_rsp(struct qseecom_dev_handle *data)
+{
+	int ret;
+
+	ret = (qseecom.send_resp_flag != 0);
+	return ret || data->abort;
+}
+
+static int __qseecom_reentrancy_listener_has_sent_rsp(
+			struct qseecom_dev_handle *data,
+			struct qseecom_registered_listener_list *ptr_svc)
+{
+	int ret;
+
+	ret = (ptr_svc->send_resp_flag != 0);
+	return ret || data->abort;
+}
+
+static int __qseecom_qseos_fail_return_resp_tz(struct qseecom_dev_handle *data,
+					struct qseecom_command_scm_resp *resp,
+			struct qseecom_client_listener_data_irsp *send_data_rsp,
+			struct qseecom_registered_listener_list *ptr_svc,
+							uint32_t lstnr) {
+	int ret = 0;
+
+	send_data_rsp->status = QSEOS_RESULT_FAILURE;
+	qseecom.send_resp_flag = 0;
+	send_data_rsp->qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
+	send_data_rsp->listener_id = lstnr;
+	if (ptr_svc)
+		pr_warn("listener_id:%x, lstnr: %x\n",
+					ptr_svc->svc.listener_id, lstnr);
+	if (ptr_svc && ptr_svc->ihandle) {
+		ret = msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle,
+					ptr_svc->sb_virt, ptr_svc->sb_length,
+					ION_IOC_CLEAN_INV_CACHES);
+		if (ret) {
+			pr_err("cache operation failed %d\n", ret);
+			return ret;
+		}
+	}
+
+	if (lstnr == RPMB_SERVICE) {
+		ret = __qseecom_enable_clk(CLK_QSEE);
+		if (ret)
+			return ret;
+	}
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, send_data_rsp,
+				sizeof(send_data_rsp), resp, sizeof(*resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+						ret, data->client.app_id);
+		if (lstnr == RPMB_SERVICE)
+			__qseecom_disable_clk(CLK_QSEE);
+		return ret;
+	}
+	if ((resp->result != QSEOS_RESULT_SUCCESS) &&
+			(resp->result != QSEOS_RESULT_INCOMPLETE)) {
+		pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
+				resp->result, data->client.app_id, lstnr);
+		ret = -EINVAL;
+	}
+	if (lstnr == RPMB_SERVICE)
+		__qseecom_disable_clk(CLK_QSEE);
+	return ret;
+}
+
+static void __qseecom_clean_listener_sglistinfo(
+			struct qseecom_registered_listener_list *ptr_svc)
+{
+	if (ptr_svc->sglist_cnt) {
+		memset(ptr_svc->sglistinfo_ptr, 0,
+			SGLISTINFO_TABLE_SIZE);
+		ptr_svc->sglist_cnt = 0;
+	}
+}
+
+static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
+					struct qseecom_command_scm_resp *resp)
+{
+	int ret = 0;
+	int rc = 0;
+	uint32_t lstnr;
+	unsigned long flags;
+	struct qseecom_client_listener_data_irsp send_data_rsp;
+	struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
+	struct qseecom_registered_listener_list *ptr_svc = NULL;
+	sigset_t new_sigset;
+	sigset_t old_sigset;
+	uint32_t status;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = NULL;
+
+	while (resp->result == QSEOS_RESULT_INCOMPLETE) {
+		lstnr = resp->data;
+		/*
+		 * Wake up blocking lsitener service with the lstnr id
+		 */
+		spin_lock_irqsave(&qseecom.registered_listener_list_lock,
+					flags);
+		list_for_each_entry(ptr_svc,
+				&qseecom.registered_listener_list_head, list) {
+			if (ptr_svc->svc.listener_id == lstnr) {
+				ptr_svc->listener_in_use = true;
+				ptr_svc->rcv_req_flag = 1;
+				wake_up_interruptible(&ptr_svc->rcv_req_wq);
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
+				flags);
+
+		if (ptr_svc == NULL) {
+			pr_err("Listener Svc %d does not exist\n", lstnr);
+			__qseecom_qseos_fail_return_resp_tz(data, resp,
+					&send_data_rsp, ptr_svc, lstnr);
+			return -EINVAL;
+		}
+
+		if (!ptr_svc->ihandle) {
+			pr_err("Client handle is not initialized\n");
+			__qseecom_qseos_fail_return_resp_tz(data, resp,
+					&send_data_rsp, ptr_svc, lstnr);
+			return -EINVAL;
+		}
+
+		if (ptr_svc->svc.listener_id != lstnr) {
+			pr_warn("Service requested does not exist\n");
+			__qseecom_qseos_fail_return_resp_tz(data, resp,
+					&send_data_rsp, ptr_svc, lstnr);
+			return -ERESTARTSYS;
+		}
+		pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
+
+		/* initialize the new signal mask with all signals*/
+		sigfillset(&new_sigset);
+		/* block all signals */
+		sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+
+		do {
+			/*
+			 * When reentrancy is not supported, check global
+			 * send_resp_flag; otherwise, check this listener's
+			 * send_resp_flag.
+			 */
+			if (!qseecom.qsee_reentrancy_support &&
+				!wait_event_freezable(qseecom.send_resp_wq,
+				__qseecom_listener_has_sent_rsp(data))) {
+				break;
+			}
+
+			if (qseecom.qsee_reentrancy_support &&
+				!wait_event_freezable(qseecom.send_resp_wq,
+				__qseecom_reentrancy_listener_has_sent_rsp(
+						data, ptr_svc))) {
+				break;
+			}
+		} while (1);
+
+		/* restore signal mask */
+		sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+		if (data->abort) {
+			pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
+				data->client.app_id, lstnr, ret);
+			rc = -ENODEV;
+			status = QSEOS_RESULT_FAILURE;
+		} else {
+			status = QSEOS_RESULT_SUCCESS;
+		}
+
+		qseecom.send_resp_flag = 0;
+		ptr_svc->send_resp_flag = 0;
+		table = ptr_svc->sglistinfo_ptr;
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			send_data_rsp.listener_id  = lstnr;
+			send_data_rsp.status = status;
+			send_data_rsp.sglistinfo_ptr =
+				(uint32_t)virt_to_phys(table);
+			send_data_rsp.sglistinfo_len =
+				SGLISTINFO_TABLE_SIZE;
+			dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+			cmd_buf = (void *)&send_data_rsp;
+			cmd_len = sizeof(send_data_rsp);
+		} else {
+			send_data_rsp_64bit.listener_id  = lstnr;
+			send_data_rsp_64bit.status = status;
+			send_data_rsp_64bit.sglistinfo_ptr =
+				virt_to_phys(table);
+			send_data_rsp_64bit.sglistinfo_len =
+				SGLISTINFO_TABLE_SIZE;
+			dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+			cmd_buf = (void *)&send_data_rsp_64bit;
+			cmd_len = sizeof(send_data_rsp_64bit);
+		}
+		if (qseecom.whitelist_support == false)
+			*(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
+		else
+			*(uint32_t *)cmd_buf =
+				QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
+		if (ptr_svc) {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+					ptr_svc->ihandle,
+					ptr_svc->sb_virt, ptr_svc->sb_length,
+					ION_IOC_CLEAN_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				return ret;
+			}
+		}
+
+		if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
+			ret = __qseecom_enable_clk(CLK_QSEE);
+			if (ret)
+				return ret;
+		}
+
+		ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+					cmd_buf, cmd_len, resp, sizeof(*resp));
+		ptr_svc->listener_in_use = false;
+		__qseecom_clean_listener_sglistinfo(ptr_svc);
+		if (ret) {
+			pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+				ret, data->client.app_id);
+			if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
+				__qseecom_disable_clk(CLK_QSEE);
+			return ret;
+		}
+		if ((resp->result != QSEOS_RESULT_SUCCESS) &&
+			(resp->result != QSEOS_RESULT_INCOMPLETE)) {
+			pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
+				resp->result, data->client.app_id, lstnr);
+			ret = -EINVAL;
+		}
+		if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
+			__qseecom_disable_clk(CLK_QSEE);
+
+	}
+	if (rc)
+		return rc;
+
+	return ret;
+}
+
+int __qseecom_process_reentrancy_blocked_on_listener(
+				struct qseecom_command_scm_resp *resp,
+				struct qseecom_registered_app_list *ptr_app,
+				struct qseecom_dev_handle *data)
+{
+	struct qseecom_registered_listener_list *list_ptr;
+	int ret = 0;
+	struct qseecom_continue_blocked_request_ireq ireq;
+	struct qseecom_command_scm_resp continue_resp;
+	sigset_t new_sigset, old_sigset;
+	unsigned long flags;
+	bool found_app = false;
+
+	if (!resp || !data) {
+		pr_err("invalid resp or data pointer\n");
+		ret = -EINVAL;
+		goto exit;
+	}
+
+	/* find app_id & img_name from list */
+	if (!ptr_app) {
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+							list) {
+			if ((ptr_app->app_id == data->client.app_id) &&
+				(!strcmp(ptr_app->app_name,
+						data->client.app_name))) {
+				found_app = true;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+					flags);
+		if (!found_app) {
+			pr_err("app_id %d (%s) is not found\n",
+				data->client.app_id,
+				(char *)data->client.app_name);
+			ret = -ENOENT;
+			goto exit;
+		}
+	}
+
+	list_ptr = __qseecom_find_svc(resp->data);
+	if (!list_ptr) {
+		pr_err("Invalid listener ID\n");
+		ret = -ENODATA;
+		goto exit;
+	}
+	pr_debug("lsntr %d in_use = %d\n",
+			resp->data, list_ptr->listener_in_use);
+	ptr_app->blocked_on_listener_id = resp->data;
+	/* sleep until listener is available */
+	do {
+		qseecom.app_block_ref_cnt++;
+		ptr_app->app_blocked = true;
+		sigfillset(&new_sigset);
+		sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+		mutex_unlock(&app_access_lock);
+		do {
+			if (!wait_event_freezable(
+				list_ptr->listener_block_app_wq,
+				!list_ptr->listener_in_use)) {
+				break;
+			}
+		} while (1);
+		mutex_lock(&app_access_lock);
+		sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+		ptr_app->app_blocked = false;
+		qseecom.app_block_ref_cnt--;
+	} while (list_ptr->listener_in_use == true);
+	ptr_app->blocked_on_listener_id = 0;
+	/* notify the blocked app that listener is available */
+	pr_warn("Lsntr %d is available, unblock app(%d) %s in TZ\n",
+		resp->data, data->client.app_id,
+		data->client.app_name);
+	ireq.qsee_cmd_id = QSEOS_CONTINUE_BLOCKED_REQ_COMMAND;
+	ireq.app_id = data->client.app_id;
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+			&ireq, sizeof(ireq),
+			&continue_resp, sizeof(continue_resp));
+	if (ret) {
+		pr_err("scm_call for continue blocked req for app(%d) %s failed, ret %d\n",
+			data->client.app_id,
+			data->client.app_name, ret);
+		goto exit;
+	}
+	/*
+	 * After TZ app is unblocked, then continue to next case
+	 * for incomplete request processing
+	 */
+	resp->result = QSEOS_RESULT_INCOMPLETE;
+exit:
+	return ret;
+}
+
+static int __qseecom_reentrancy_process_incomplete_cmd(
+					struct qseecom_dev_handle *data,
+					struct qseecom_command_scm_resp *resp)
+{
+	int ret = 0;
+	int rc = 0;
+	uint32_t lstnr;
+	unsigned long flags;
+	struct qseecom_client_listener_data_irsp send_data_rsp;
+	struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
+	struct qseecom_registered_listener_list *ptr_svc = NULL;
+	sigset_t new_sigset;
+	sigset_t old_sigset;
+	uint32_t status;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = NULL;
+
+	while (ret == 0 && rc == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
+		lstnr = resp->data;
+		/*
+		 * Wake up blocking lsitener service with the lstnr id
+		 */
+		spin_lock_irqsave(&qseecom.registered_listener_list_lock,
+					flags);
+		list_for_each_entry(ptr_svc,
+				&qseecom.registered_listener_list_head, list) {
+			if (ptr_svc->svc.listener_id == lstnr) {
+				ptr_svc->listener_in_use = true;
+				ptr_svc->rcv_req_flag = 1;
+				wake_up_interruptible(&ptr_svc->rcv_req_wq);
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
+				flags);
+
+		if (ptr_svc == NULL) {
+			pr_err("Listener Svc %d does not exist\n", lstnr);
+			return -EINVAL;
+		}
+
+		if (!ptr_svc->ihandle) {
+			pr_err("Client handle is not initialized\n");
+			return -EINVAL;
+		}
+
+		if (ptr_svc->svc.listener_id != lstnr) {
+			pr_warn("Service requested does not exist\n");
+			return -ERESTARTSYS;
+		}
+		pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
+
+		/* initialize the new signal mask with all signals*/
+		sigfillset(&new_sigset);
+
+		/* block all signals */
+		sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+
+		/* unlock mutex btw waking listener and sleep-wait */
+		mutex_unlock(&app_access_lock);
+		do {
+			if (!wait_event_freezable(qseecom.send_resp_wq,
+				__qseecom_reentrancy_listener_has_sent_rsp(
+						data, ptr_svc))) {
+				break;
+			}
+		} while (1);
+		/* lock mutex again after resp sent */
+		mutex_lock(&app_access_lock);
+		ptr_svc->send_resp_flag = 0;
+		qseecom.send_resp_flag = 0;
+
+		/* restore signal mask */
+		sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+		if (data->abort) {
+			pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
+				data->client.app_id, lstnr, ret);
+			rc = -ENODEV;
+			status  = QSEOS_RESULT_FAILURE;
+		} else {
+			status  = QSEOS_RESULT_SUCCESS;
+		}
+		table = ptr_svc->sglistinfo_ptr;
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			send_data_rsp.listener_id  = lstnr;
+			send_data_rsp.status = status;
+			send_data_rsp.sglistinfo_ptr =
+				(uint32_t)virt_to_phys(table);
+			send_data_rsp.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+			dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+			cmd_buf = (void *)&send_data_rsp;
+			cmd_len = sizeof(send_data_rsp);
+		} else {
+			send_data_rsp_64bit.listener_id  = lstnr;
+			send_data_rsp_64bit.status = status;
+			send_data_rsp_64bit.sglistinfo_ptr =
+				virt_to_phys(table);
+			send_data_rsp_64bit.sglistinfo_len =
+				SGLISTINFO_TABLE_SIZE;
+			dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+			cmd_buf = (void *)&send_data_rsp_64bit;
+			cmd_len = sizeof(send_data_rsp_64bit);
+		}
+		if (qseecom.whitelist_support == false)
+			*(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
+		else
+			*(uint32_t *)cmd_buf =
+				QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
+		if (ptr_svc) {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+					ptr_svc->ihandle,
+					ptr_svc->sb_virt, ptr_svc->sb_length,
+					ION_IOC_CLEAN_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				return ret;
+			}
+		}
+		if (lstnr == RPMB_SERVICE) {
+			ret = __qseecom_enable_clk(CLK_QSEE);
+			if (ret)
+				return ret;
+		}
+
+		ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+					cmd_buf, cmd_len, resp, sizeof(*resp));
+		ptr_svc->listener_in_use = false;
+		__qseecom_clean_listener_sglistinfo(ptr_svc);
+		wake_up_interruptible(&ptr_svc->listener_block_app_wq);
+
+		if (ret) {
+			pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+				ret, data->client.app_id);
+			goto exit;
+		}
+
+		switch (resp->result) {
+		case QSEOS_RESULT_BLOCKED_ON_LISTENER:
+			pr_warn("send lsr %d rsp, but app %d block on lsr %d\n",
+					lstnr, data->client.app_id, resp->data);
+			if (lstnr == resp->data) {
+				pr_err("lstnr %d should not be blocked!\n",
+					lstnr);
+				ret = -EINVAL;
+				goto exit;
+			}
+			ret = __qseecom_process_reentrancy_blocked_on_listener(
+					resp, NULL, data);
+			if (ret) {
+				pr_err("failed to process App(%d) %s blocked on listener %d\n",
+					data->client.app_id,
+					data->client.app_name, resp->data);
+				goto exit;
+			}
+		case QSEOS_RESULT_SUCCESS:
+		case QSEOS_RESULT_INCOMPLETE:
+			break;
+		default:
+			pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
+				resp->result, data->client.app_id, lstnr);
+			ret = -EINVAL;
+			goto exit;
+		}
+exit:
+		if (lstnr == RPMB_SERVICE)
+			__qseecom_disable_clk(CLK_QSEE);
+
+	}
+	if (rc)
+		return rc;
+
+	return ret;
+}
+
+/*
+ * QSEE doesn't support OS level cmds reentrancy until RE phase-3,
+ * and QSEE OS level scm_call cmds will fail if there is any blocked TZ app.
+ * So, needs to first check if no app blocked before sending OS level scm call,
+ * then wait until all apps are unblocked.
+ */
+static void __qseecom_reentrancy_check_if_no_app_blocked(uint32_t smc_id)
+{
+	sigset_t new_sigset, old_sigset;
+
+	if (qseecom.qsee_reentrancy_support > QSEE_REENTRANCY_PHASE_0 &&
+		qseecom.qsee_reentrancy_support < QSEE_REENTRANCY_PHASE_3 &&
+		IS_OWNER_TRUSTED_OS(TZ_SYSCALL_OWNER_ID(smc_id))) {
+		/* thread sleep until this app unblocked */
+		while (qseecom.app_block_ref_cnt > 0) {
+			sigfillset(&new_sigset);
+			sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+			mutex_unlock(&app_access_lock);
+			do {
+				if (!wait_event_freezable(qseecom.app_block_wq,
+					(qseecom.app_block_ref_cnt == 0)))
+					break;
+			} while (1);
+			mutex_lock(&app_access_lock);
+			sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+		}
+	}
+}
+
+/*
+ * scm_call of send data will fail if this TA is blocked or there are more
+ * than one TA requesting listener services; So, first check to see if need
+ * to wait.
+ */
+static void __qseecom_reentrancy_check_if_this_app_blocked(
+			struct qseecom_registered_app_list *ptr_app)
+{
+	sigset_t new_sigset, old_sigset;
+
+	if (qseecom.qsee_reentrancy_support) {
+		while (ptr_app->app_blocked || qseecom.app_block_ref_cnt > 1) {
+			/* thread sleep until this app unblocked */
+			sigfillset(&new_sigset);
+			sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+			mutex_unlock(&app_access_lock);
+			do {
+				if (!wait_event_freezable(qseecom.app_block_wq,
+					(!ptr_app->app_blocked &&
+					qseecom.app_block_ref_cnt <= 1)))
+					break;
+			} while (1);
+			mutex_lock(&app_access_lock);
+			sigprocmask(SIG_SETMASK, &old_sigset, NULL);
+		}
+	}
+}
+
+static int __qseecom_check_app_exists(struct qseecom_check_app_ireq req,
+					uint32_t *app_id)
+{
+	int32_t ret;
+	struct qseecom_command_scm_resp resp;
+	bool found_app = false;
+	struct qseecom_registered_app_list *entry = NULL;
+	unsigned long flags = 0;
+
+	if (!app_id) {
+		pr_err("Null pointer to app_id\n");
+		return -EINVAL;
+	}
+	*app_id = 0;
+
+	/* check if app exists and has been registered locally */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(entry,
+			&qseecom.registered_app_list_head, list) {
+		if (!strcmp(entry->app_name, req.app_name)) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+	if (found_app) {
+		pr_debug("Found app with id %d\n", entry->app_id);
+		*app_id = entry->app_id;
+		return 0;
+	}
+
+	memset((void *)&resp, 0, sizeof(resp));
+
+	/*  SCM_CALL  to check if app_id for the mentioned app exists */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+				sizeof(struct qseecom_check_app_ireq),
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to check if app is already loaded failed\n");
+		return -EINVAL;
+	}
+
+	if (resp.result == QSEOS_RESULT_FAILURE)
+		return 0;
+
+	switch (resp.resp_type) {
+	/*qsee returned listener type response */
+	case QSEOS_LISTENER_ID:
+		pr_err("resp type is of listener type instead of app");
+		return -EINVAL;
+	case QSEOS_APP_ID:
+		*app_id = resp.data;
+		return 0;
+	default:
+		pr_err("invalid resp type (%d) from qsee",
+				resp.resp_type);
+		return -ENODEV;
+	}
+}
+
+static int qseecom_load_app(struct qseecom_dev_handle *data, void __user *argp)
+{
+	struct qseecom_registered_app_list *entry = NULL;
+	unsigned long flags = 0;
+	u32 app_id = 0;
+	struct ion_handle *ihandle;	/* Ion handle */
+	struct qseecom_load_img_req load_img_req;
+	int32_t ret = 0;
+	ion_phys_addr_t pa = 0;
+	size_t len;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_check_app_ireq req;
+	struct qseecom_load_app_ireq load_req;
+	struct qseecom_load_app_64bit_ireq load_req_64bit;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	bool first_time = false;
+
+	/* Copy the relevant information needed for loading the image */
+	if (copy_from_user(&load_img_req,
+				(void __user *)argp,
+				sizeof(struct qseecom_load_img_req))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	/* Check and load cmnlib */
+	if (qseecom.qsee_version > QSEEE_VERSION_00) {
+		if (!qseecom.commonlib_loaded &&
+				load_img_req.app_arch == ELFCLASS32) {
+			ret = qseecom_load_commonlib_image(data, "cmnlib");
+			if (ret) {
+				pr_err("failed to load cmnlib\n");
+				return -EIO;
+			}
+			qseecom.commonlib_loaded = true;
+			pr_debug("cmnlib is loaded\n");
+		}
+
+		if (!qseecom.commonlib64_loaded &&
+				load_img_req.app_arch == ELFCLASS64) {
+			ret = qseecom_load_commonlib_image(data, "cmnlib64");
+			if (ret) {
+				pr_err("failed to load cmnlib64\n");
+				return -EIO;
+			}
+			qseecom.commonlib64_loaded = true;
+			pr_debug("cmnlib64 is loaded\n");
+		}
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+		mutex_unlock(&qsee_bw_mutex);
+		if (ret)
+			return ret;
+	}
+
+	/* Vote for the SFPB clock */
+	ret = __qseecom_enable_clk_scale_up(data);
+	if (ret)
+		goto enable_clk_err;
+
+	req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+	load_img_req.img_name[MAX_APP_NAME_SIZE-1] = '\0';
+	strlcpy(req.app_name, load_img_req.img_name, MAX_APP_NAME_SIZE);
+
+	ret = __qseecom_check_app_exists(req, &app_id);
+	if (ret < 0)
+		goto loadapp_err;
+
+	if (app_id) {
+		pr_debug("App id %d (%s) already exists\n", app_id,
+			(char *)(req.app_name));
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(entry,
+		&qseecom.registered_app_list_head, list){
+			if (entry->app_id == app_id) {
+				entry->ref_cnt++;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(
+		&qseecom.registered_app_list_lock, flags);
+		ret = 0;
+	} else {
+		first_time = true;
+		pr_warn("App (%s) does'nt exist, loading apps for first time\n",
+			(char *)(load_img_req.img_name));
+		/* Get the handle of the shared fd */
+		ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+					load_img_req.ifd_data_fd);
+		if (IS_ERR_OR_NULL(ihandle)) {
+			pr_err("Ion client could not retrieve the handle\n");
+			ret = -ENOMEM;
+			goto loadapp_err;
+		}
+
+		/* Get the physical address of the ION BUF */
+		ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
+		if (ret) {
+			pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+				ret);
+			goto loadapp_err;
+		}
+		if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
+			pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
+					len, load_img_req.mdt_len,
+					load_img_req.img_len);
+			ret = -EINVAL;
+			goto loadapp_err;
+		}
+		/* Populate the structure for sending scm call to load image */
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+			load_req.mdt_len = load_img_req.mdt_len;
+			load_req.img_len = load_img_req.img_len;
+			strlcpy(load_req.app_name, load_img_req.img_name,
+						MAX_APP_NAME_SIZE);
+			load_req.phy_addr = (uint32_t)pa;
+			cmd_buf = (void *)&load_req;
+			cmd_len = sizeof(struct qseecom_load_app_ireq);
+		} else {
+			load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+			load_req_64bit.mdt_len = load_img_req.mdt_len;
+			load_req_64bit.img_len = load_img_req.img_len;
+			strlcpy(load_req_64bit.app_name, load_img_req.img_name,
+						MAX_APP_NAME_SIZE);
+			load_req_64bit.phy_addr = (uint64_t)pa;
+			cmd_buf = (void *)&load_req_64bit;
+			cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
+		}
+
+		ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
+					ION_IOC_CLEAN_INV_CACHES);
+		if (ret) {
+			pr_err("cache operation failed %d\n", ret);
+			goto loadapp_err;
+		}
+
+		/*  SCM_CALL  to load the app and get the app_id back */
+		ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf,
+			cmd_len, &resp, sizeof(resp));
+		if (ret) {
+			pr_err("scm_call to load app failed\n");
+			if (!IS_ERR_OR_NULL(ihandle))
+				ion_free(qseecom.ion_clnt, ihandle);
+			ret = -EINVAL;
+			goto loadapp_err;
+		}
+
+		if (resp.result == QSEOS_RESULT_FAILURE) {
+			pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
+			if (!IS_ERR_OR_NULL(ihandle))
+				ion_free(qseecom.ion_clnt, ihandle);
+			ret = -EFAULT;
+			goto loadapp_err;
+		}
+
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+					ret);
+				if (!IS_ERR_OR_NULL(ihandle))
+					ion_free(qseecom.ion_clnt, ihandle);
+				ret = -EFAULT;
+				goto loadapp_err;
+			}
+		}
+
+		if (resp.result != QSEOS_RESULT_SUCCESS) {
+			pr_err("scm_call failed resp.result unknown, %d\n",
+				resp.result);
+			if (!IS_ERR_OR_NULL(ihandle))
+				ion_free(qseecom.ion_clnt, ihandle);
+			ret = -EFAULT;
+			goto loadapp_err;
+		}
+
+		app_id = resp.data;
+
+		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+		if (!entry) {
+			ret = -ENOMEM;
+			goto loadapp_err;
+		}
+		entry->app_id = app_id;
+		entry->ref_cnt = 1;
+		entry->app_arch = load_img_req.app_arch;
+		/*
+		 * keymaster app may be first loaded as "keymaste" by qseecomd,
+		 * and then used as "keymaster" on some targets. To avoid app
+		 * name checking error, register "keymaster" into app_list and
+		 * thread private data.
+		 */
+		if (!strcmp(load_img_req.img_name, "keymaste"))
+			strlcpy(entry->app_name, "keymaster",
+					MAX_APP_NAME_SIZE);
+		else
+			strlcpy(entry->app_name, load_img_req.img_name,
+					MAX_APP_NAME_SIZE);
+		entry->app_blocked = false;
+		entry->blocked_on_listener_id = 0;
+
+		/* Deallocate the handle */
+		if (!IS_ERR_OR_NULL(ihandle))
+			ion_free(qseecom.ion_clnt, ihandle);
+
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_add_tail(&entry->list, &qseecom.registered_app_list_head);
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+									flags);
+
+		pr_warn("App with id %u (%s) now loaded\n", app_id,
+		(char *)(load_img_req.img_name));
+	}
+	data->client.app_id = app_id;
+	data->client.app_arch = load_img_req.app_arch;
+	if (!strcmp(load_img_req.img_name, "keymaste"))
+		strlcpy(data->client.app_name, "keymaster", MAX_APP_NAME_SIZE);
+	else
+		strlcpy(data->client.app_name, load_img_req.img_name,
+					MAX_APP_NAME_SIZE);
+	load_img_req.app_id = app_id;
+	if (copy_to_user(argp, &load_img_req, sizeof(load_img_req))) {
+		pr_err("copy_to_user failed\n");
+		ret = -EFAULT;
+		if (first_time == true) {
+			spin_lock_irqsave(
+				&qseecom.registered_app_list_lock, flags);
+			list_del(&entry->list);
+			spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+			kzfree(entry);
+		}
+	}
+
+loadapp_err:
+	__qseecom_disable_clk_scale_down(data);
+enable_clk_err:
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		qseecom_unregister_bus_bandwidth_needs(data);
+		mutex_unlock(&qsee_bw_mutex);
+	}
+	return ret;
+}
+
+static int __qseecom_cleanup_app(struct qseecom_dev_handle *data)
+{
+	int ret = 1;	/* Set unload app */
+
+	wake_up_all(&qseecom.send_resp_wq);
+	if (qseecom.qsee_reentrancy_support)
+		mutex_unlock(&app_access_lock);
+	while (atomic_read(&data->ioctl_count) > 1) {
+		if (wait_event_freezable(data->abort_wq,
+					atomic_read(&data->ioctl_count) <= 1)) {
+			pr_err("Interrupted from abort\n");
+			ret = -ERESTARTSYS;
+			break;
+		}
+	}
+	if (qseecom.qsee_reentrancy_support)
+		mutex_lock(&app_access_lock);
+	return ret;
+}
+
+static int qseecom_unmap_ion_allocated_memory(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+
+	if (!IS_ERR_OR_NULL(data->client.ihandle)) {
+		ion_unmap_kernel(qseecom.ion_clnt, data->client.ihandle);
+		ion_free(qseecom.ion_clnt, data->client.ihandle);
+		data->client.ihandle = NULL;
+	}
+	return ret;
+}
+
+static int qseecom_unload_app(struct qseecom_dev_handle *data,
+				bool app_crash)
+{
+	unsigned long flags;
+	unsigned long flags1;
+	int ret = 0;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_registered_app_list *ptr_app = NULL;
+	bool unload = false;
+	bool found_app = false;
+	bool found_dead_app = false;
+
+	if (!data) {
+		pr_err("Invalid/uninitialized device handle\n");
+		return -EINVAL;
+	}
+
+	if (!memcmp(data->client.app_name, "keymaste", strlen("keymaste"))) {
+		pr_debug("Do not unload keymaster app from tz\n");
+		goto unload_exit;
+	}
+
+	__qseecom_cleanup_app(data);
+	__qseecom_reentrancy_check_if_no_app_blocked(TZ_OS_APP_SHUTDOWN_ID);
+
+	if (data->client.app_id > 0) {
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+									list) {
+			if (ptr_app->app_id == data->client.app_id) {
+				if (!strcmp((void *)ptr_app->app_name,
+					(void *)data->client.app_name)) {
+					found_app = true;
+					if (app_crash || ptr_app->ref_cnt == 1)
+						unload = true;
+					break;
+				}
+				found_dead_app = true;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+								flags);
+		if (found_app == false && found_dead_app == false) {
+			pr_err("Cannot find app with id = %d (%s)\n",
+				data->client.app_id,
+				(char *)data->client.app_name);
+			ret = -EINVAL;
+			goto unload_exit;
+		}
+	}
+
+	if (found_dead_app)
+		pr_warn("cleanup app_id %d(%s)\n", data->client.app_id,
+			(char *)data->client.app_name);
+
+	if (unload) {
+		struct qseecom_unload_app_ireq req;
+		/* Populate the structure for sending scm call to load image */
+		req.qsee_cmd_id = QSEOS_APP_SHUTDOWN_COMMAND;
+		req.app_id = data->client.app_id;
+
+		/* SCM_CALL to unload the app */
+		ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+				sizeof(struct qseecom_unload_app_ireq),
+				&resp, sizeof(resp));
+		if (ret) {
+			pr_err("scm_call to unload app (id = %d) failed\n",
+								req.app_id);
+			ret = -EFAULT;
+			goto unload_exit;
+		} else {
+			pr_warn("App id %d now unloaded\n", req.app_id);
+		}
+		if (resp.result == QSEOS_RESULT_FAILURE) {
+			pr_err("app (%d) unload_failed!!\n",
+					data->client.app_id);
+			ret = -EFAULT;
+			goto unload_exit;
+		}
+		if (resp.result == QSEOS_RESULT_SUCCESS)
+			pr_debug("App (%d) is unloaded!!\n",
+					data->client.app_id);
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd fail err: %d\n",
+									ret);
+				goto unload_exit;
+			}
+		}
+	}
+
+	if (found_app) {
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags1);
+		if (app_crash) {
+			ptr_app->ref_cnt = 0;
+			pr_debug("app_crash: ref_count = 0\n");
+		} else {
+			if (ptr_app->ref_cnt == 1) {
+				ptr_app->ref_cnt = 0;
+				pr_debug("ref_count set to 0\n");
+			} else {
+				ptr_app->ref_cnt--;
+				pr_debug("Can't unload app(%d) inuse\n",
+					ptr_app->app_id);
+			}
+		}
+		if (unload) {
+			list_del(&ptr_app->list);
+			kzfree(ptr_app);
+		}
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+								flags1);
+	}
+unload_exit:
+	qseecom_unmap_ion_allocated_memory(data);
+	data->released = true;
+	return ret;
+}
+
+static phys_addr_t __qseecom_uvirt_to_kphys(struct qseecom_dev_handle *data,
+						unsigned long virt)
+{
+	return data->client.sb_phys + (virt - data->client.user_virt_sb_base);
+}
+
+static uintptr_t __qseecom_uvirt_to_kvirt(struct qseecom_dev_handle *data,
+						unsigned long virt)
+{
+	return (uintptr_t)data->client.sb_virt +
+				(virt - data->client.user_virt_sb_base);
+}
+
+int __qseecom_process_rpmb_svc_cmd(struct qseecom_dev_handle *data_ptr,
+		struct qseecom_send_svc_cmd_req *req_ptr,
+		struct qseecom_client_send_service_ireq *send_svc_ireq_ptr)
+{
+	int ret = 0;
+	void *req_buf = NULL;
+
+	if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
+		pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
+			req_ptr, send_svc_ireq_ptr);
+		return -EINVAL;
+	}
+
+	/* Clients need to ensure req_buf is at base offset of shared buffer */
+	if ((uintptr_t)req_ptr->cmd_req_buf !=
+			data_ptr->client.user_virt_sb_base) {
+		pr_err("cmd buf not pointing to base offset of shared buffer\n");
+		return -EINVAL;
+	}
+
+	if (data_ptr->client.sb_length <
+			sizeof(struct qseecom_rpmb_provision_key)) {
+		pr_err("shared buffer is too small to hold key type\n");
+		return -EINVAL;
+	}
+	req_buf = data_ptr->client.sb_virt;
+
+	send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
+	send_svc_ireq_ptr->key_type =
+		((struct qseecom_rpmb_provision_key *)req_buf)->key_type;
+	send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
+	send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+			data_ptr, (uintptr_t)req_ptr->resp_buf));
+	send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
+
+	return ret;
+}
+
+int __qseecom_process_fsm_key_svc_cmd(struct qseecom_dev_handle *data_ptr,
+		struct qseecom_send_svc_cmd_req *req_ptr,
+		struct qseecom_client_send_fsm_key_req *send_svc_ireq_ptr)
+{
+	int ret = 0;
+	uint32_t reqd_len_sb_in = 0;
+
+	if ((req_ptr == NULL) || (send_svc_ireq_ptr == NULL)) {
+		pr_err("Error with pointer: req_ptr = %pK, send_svc_ptr = %pK\n",
+			req_ptr, send_svc_ireq_ptr);
+		return -EINVAL;
+	}
+
+	reqd_len_sb_in = req_ptr->cmd_req_len + req_ptr->resp_len;
+	if (reqd_len_sb_in > data_ptr->client.sb_length) {
+		pr_err("Not enough memory to fit cmd_buf and resp_buf. ");
+		pr_err("Required: %u, Available: %zu\n",
+				reqd_len_sb_in, data_ptr->client.sb_length);
+		return -ENOMEM;
+	}
+
+	send_svc_ireq_ptr->qsee_cmd_id = req_ptr->cmd_id;
+	send_svc_ireq_ptr->req_len = req_ptr->cmd_req_len;
+	send_svc_ireq_ptr->rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+			data_ptr, (uintptr_t)req_ptr->resp_buf));
+	send_svc_ireq_ptr->rsp_len = req_ptr->resp_len;
+
+	send_svc_ireq_ptr->req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+			data_ptr, (uintptr_t)req_ptr->cmd_req_buf));
+
+
+	return ret;
+}
+
+static int __validate_send_service_cmd_inputs(struct qseecom_dev_handle *data,
+				struct qseecom_send_svc_cmd_req *req)
+{
+	if (!req || !req->resp_buf || !req->cmd_req_buf) {
+		pr_err("req or cmd buffer or response buffer is null\n");
+		return -EINVAL;
+	}
+
+	if (!data || !data->client.ihandle) {
+		pr_err("Client or client handle is not initialized\n");
+		return -EINVAL;
+	}
+
+	if (data->client.sb_virt == NULL) {
+		pr_err("sb_virt null\n");
+		return -EINVAL;
+	}
+
+	if (data->client.user_virt_sb_base == 0) {
+		pr_err("user_virt_sb_base is null\n");
+		return -EINVAL;
+	}
+
+	if (data->client.sb_length == 0) {
+		pr_err("sb_length is 0\n");
+		return -EINVAL;
+	}
+
+	if (((uintptr_t)req->cmd_req_buf <
+				data->client.user_virt_sb_base) ||
+		((uintptr_t)req->cmd_req_buf >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("cmd buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+	if (((uintptr_t)req->resp_buf <
+				data->client.user_virt_sb_base)  ||
+		((uintptr_t)req->resp_buf >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("response buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+	if ((req->cmd_req_len == 0) || (req->resp_len == 0) ||
+		(req->cmd_req_len > data->client.sb_length) ||
+		(req->resp_len > data->client.sb_length)) {
+		pr_err("cmd buf length or response buf length not valid\n");
+		return -EINVAL;
+	}
+	if (req->cmd_req_len > UINT_MAX - req->resp_len) {
+		pr_err("Integer overflow detected in req_len & rsp_len\n");
+		return -EINVAL;
+	}
+
+	if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
+		pr_debug("Not enough memory to fit cmd_buf.\n");
+		pr_debug("resp_buf. Required: %u, Available: %zu\n",
+				(req->cmd_req_len + req->resp_len),
+					data->client.sb_length);
+		return -ENOMEM;
+	}
+	if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
+		pr_err("Integer overflow in req_len & cmd_req_buf\n");
+		return -EINVAL;
+	}
+	if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
+		pr_err("Integer overflow in resp_len & resp_buf\n");
+		return -EINVAL;
+	}
+	if (data->client.user_virt_sb_base >
+					(ULONG_MAX - data->client.sb_length)) {
+		pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+		return -EINVAL;
+	}
+	if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+					data->client.sb_length)) ||
+		(((uintptr_t)req->resp_buf + req->resp_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+					data->client.sb_length))) {
+		pr_err("cmd buf or resp buf is out of shared buffer region\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int qseecom_send_service_cmd(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	int ret = 0;
+	struct qseecom_client_send_service_ireq send_svc_ireq;
+	struct qseecom_client_send_fsm_key_req send_fsm_key_svc_ireq;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_send_svc_cmd_req req;
+	void   *send_req_ptr;
+	size_t req_buf_size;
+
+	/*struct qseecom_command_scm_resp resp;*/
+
+	if (copy_from_user(&req,
+				(void __user *)argp,
+				sizeof(req))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	if (__validate_send_service_cmd_inputs(data, &req))
+		return -EINVAL;
+
+	data->type = QSEECOM_SECURE_SERVICE;
+
+	switch (req.cmd_id) {
+	case QSEOS_RPMB_PROVISION_KEY_COMMAND:
+	case QSEOS_RPMB_ERASE_COMMAND:
+	case QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND:
+		send_req_ptr = &send_svc_ireq;
+		req_buf_size = sizeof(send_svc_ireq);
+		if (__qseecom_process_rpmb_svc_cmd(data, &req,
+				send_req_ptr))
+			return -EINVAL;
+		break;
+	case QSEOS_FSM_LTEOTA_REQ_CMD:
+	case QSEOS_FSM_LTEOTA_REQ_RSP_CMD:
+	case QSEOS_FSM_IKE_REQ_CMD:
+	case QSEOS_FSM_IKE_REQ_RSP_CMD:
+	case QSEOS_FSM_OEM_FUSE_WRITE_ROW:
+	case QSEOS_FSM_OEM_FUSE_READ_ROW:
+	case QSEOS_FSM_ENCFS_REQ_CMD:
+	case QSEOS_FSM_ENCFS_REQ_RSP_CMD:
+		send_req_ptr = &send_fsm_key_svc_ireq;
+		req_buf_size = sizeof(send_fsm_key_svc_ireq);
+		if (__qseecom_process_fsm_key_svc_cmd(data, &req,
+				send_req_ptr))
+			return -EINVAL;
+		break;
+	default:
+		pr_err("Unsupported cmd_id %d\n", req.cmd_id);
+		return -EINVAL;
+	}
+
+	if (qseecom.support_bus_scaling) {
+		ret = qseecom_scale_bus_bandwidth_timer(HIGH);
+		if (ret) {
+			pr_err("Fail to set bw HIGH\n");
+			return ret;
+		}
+	} else {
+		ret = qseecom_perf_enable(data);
+		if (ret) {
+			pr_err("Failed to vote for clocks with err %d\n", ret);
+			goto exit;
+		}
+	}
+
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+				data->client.sb_virt, data->client.sb_length,
+				ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit;
+	}
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				(const void *)send_req_ptr,
+				req_buf_size, &resp, sizeof(resp));
+	if (ret) {
+		pr_err("qseecom_scm_call failed with err: %d\n", ret);
+		if (!qseecom.support_bus_scaling) {
+			qsee_disable_clock_vote(data, CLK_DFAB);
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		} else {
+			__qseecom_add_bw_scale_down_timer(
+				QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+		}
+		goto exit;
+	}
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+				data->client.sb_virt, data->client.sb_length,
+				ION_IOC_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit;
+	}
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		pr_debug("qseos_result_incomplete\n");
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd fail with result: %d\n",
+				resp.result);
+		}
+		if (req.cmd_id == QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND) {
+			pr_warn("RPMB key status is 0x%x\n", resp.result);
+			*(uint32_t *)req.resp_buf = resp.result;
+			ret = 0;
+		}
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("scm call failed with resp.result: %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	default:
+		pr_err("Response result %d not supported\n",
+				resp.result);
+		ret = -EINVAL;
+		break;
+	}
+	if (!qseecom.support_bus_scaling) {
+		qsee_disable_clock_vote(data, CLK_DFAB);
+		qsee_disable_clock_vote(data, CLK_SFPB);
+	} else {
+		__qseecom_add_bw_scale_down_timer(
+			QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+	}
+
+exit:
+	return ret;
+}
+
+static int __validate_send_cmd_inputs(struct qseecom_dev_handle *data,
+				struct qseecom_send_cmd_req *req)
+
+{
+	if (!data || !data->client.ihandle) {
+		pr_err("Client or client handle is not initialized\n");
+		return -EINVAL;
+	}
+	if (((req->resp_buf == NULL) && (req->resp_len != 0)) ||
+						(req->cmd_req_buf == NULL)) {
+		pr_err("cmd buffer or response buffer is null\n");
+		return -EINVAL;
+	}
+	if (((uintptr_t)req->cmd_req_buf <
+				data->client.user_virt_sb_base) ||
+		((uintptr_t)req->cmd_req_buf >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("cmd buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+	if (((uintptr_t)req->resp_buf <
+				data->client.user_virt_sb_base)  ||
+		((uintptr_t)req->resp_buf >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("response buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+	if ((req->cmd_req_len == 0) ||
+		(req->cmd_req_len > data->client.sb_length) ||
+		(req->resp_len > data->client.sb_length)) {
+		pr_err("cmd buf length or response buf length not valid\n");
+		return -EINVAL;
+	}
+	if (req->cmd_req_len > UINT_MAX - req->resp_len) {
+		pr_err("Integer overflow detected in req_len & rsp_len\n");
+		return -EINVAL;
+	}
+
+	if ((req->cmd_req_len + req->resp_len) > data->client.sb_length) {
+		pr_debug("Not enough memory to fit cmd_buf.\n");
+		pr_debug("resp_buf. Required: %u, Available: %zu\n",
+				(req->cmd_req_len + req->resp_len),
+					data->client.sb_length);
+		return -ENOMEM;
+	}
+	if ((uintptr_t)req->cmd_req_buf > (ULONG_MAX - req->cmd_req_len)) {
+		pr_err("Integer overflow in req_len & cmd_req_buf\n");
+		return -EINVAL;
+	}
+	if ((uintptr_t)req->resp_buf > (ULONG_MAX - req->resp_len)) {
+		pr_err("Integer overflow in resp_len & resp_buf\n");
+		return -EINVAL;
+	}
+	if (data->client.user_virt_sb_base >
+					(ULONG_MAX - data->client.sb_length)) {
+		pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+		return -EINVAL;
+	}
+	if ((((uintptr_t)req->cmd_req_buf + req->cmd_req_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+						data->client.sb_length)) ||
+		(((uintptr_t)req->resp_buf + req->resp_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+						data->client.sb_length))) {
+		pr_err("cmd buf or resp buf is out of shared buffer region\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int __qseecom_process_reentrancy(struct qseecom_command_scm_resp *resp,
+				struct qseecom_registered_app_list *ptr_app,
+				struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+
+	switch (resp->result) {
+	case QSEOS_RESULT_BLOCKED_ON_LISTENER:
+		pr_warn("App(%d) %s is blocked on listener %d\n",
+			data->client.app_id, data->client.app_name,
+			resp->data);
+		ret = __qseecom_process_reentrancy_blocked_on_listener(
+					resp, ptr_app, data);
+		if (ret) {
+			pr_err("failed to process App(%d) %s is blocked on listener %d\n",
+			data->client.app_id, data->client.app_name, resp->data);
+			return ret;
+		}
+
+	case QSEOS_RESULT_INCOMPLETE:
+		qseecom.app_block_ref_cnt++;
+		ptr_app->app_blocked = true;
+		ret = __qseecom_reentrancy_process_incomplete_cmd(data, resp);
+		ptr_app->app_blocked = false;
+		qseecom.app_block_ref_cnt--;
+		wake_up_interruptible(&qseecom.app_block_wq);
+		if (ret)
+			pr_err("process_incomplete_cmd failed err: %d\n",
+					ret);
+		return ret;
+	case QSEOS_RESULT_SUCCESS:
+		return ret;
+	default:
+		pr_err("Response result %d not supported\n",
+						resp->result);
+		return -EINVAL;
+	}
+}
+
+static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
+				struct qseecom_send_cmd_req *req)
+{
+	int ret = 0;
+	u32 reqd_len_sb_in = 0;
+	struct qseecom_client_send_data_ireq send_data_req = {0};
+	struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
+	struct qseecom_command_scm_resp resp;
+	unsigned long flags;
+	struct qseecom_registered_app_list *ptr_app;
+	bool found_app = false;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = data->sglistinfo_ptr;
+
+	reqd_len_sb_in = req->cmd_req_len + req->resp_len;
+	/* find app_id & img_name from list */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+							list) {
+		if ((ptr_app->app_id == data->client.app_id) &&
+			 (!strcmp(ptr_app->app_name, data->client.app_name))) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+
+	if (!found_app) {
+		pr_err("app_id %d (%s) is not found\n", data->client.app_id,
+			(char *)data->client.app_name);
+		return -ENOENT;
+	}
+
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		send_data_req.app_id = data->client.app_id;
+		send_data_req.req_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+					data, (uintptr_t)req->cmd_req_buf));
+		send_data_req.req_len = req->cmd_req_len;
+		send_data_req.rsp_ptr = (uint32_t)(__qseecom_uvirt_to_kphys(
+					data, (uintptr_t)req->resp_buf));
+		send_data_req.rsp_len = req->resp_len;
+		send_data_req.sglistinfo_ptr =
+				(uint32_t)virt_to_phys(table);
+		send_data_req.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+		cmd_buf = (void *)&send_data_req;
+		cmd_len = sizeof(struct qseecom_client_send_data_ireq);
+	} else {
+		send_data_req_64bit.app_id = data->client.app_id;
+		send_data_req_64bit.req_ptr = __qseecom_uvirt_to_kphys(data,
+					(uintptr_t)req->cmd_req_buf);
+		send_data_req_64bit.req_len = req->cmd_req_len;
+		send_data_req_64bit.rsp_ptr = __qseecom_uvirt_to_kphys(data,
+					(uintptr_t)req->resp_buf);
+		send_data_req_64bit.rsp_len = req->resp_len;
+		/* check if 32bit app's phys_addr region is under 4GB.*/
+		if ((data->client.app_arch == ELFCLASS32) &&
+			((send_data_req_64bit.req_ptr >=
+				PHY_ADDR_4G - send_data_req_64bit.req_len) ||
+			(send_data_req_64bit.rsp_ptr >=
+				PHY_ADDR_4G - send_data_req_64bit.rsp_len))){
+			pr_err("32bit app %s PA exceeds 4G: req_ptr=%llx, req_len=%x, rsp_ptr=%llx, rsp_len=%x\n",
+				data->client.app_name,
+				send_data_req_64bit.req_ptr,
+				send_data_req_64bit.req_len,
+				send_data_req_64bit.rsp_ptr,
+				send_data_req_64bit.rsp_len);
+			return -EFAULT;
+		}
+		send_data_req_64bit.sglistinfo_ptr =
+				(uint64_t)virt_to_phys(table);
+		send_data_req_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+		cmd_buf = (void *)&send_data_req_64bit;
+		cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
+	}
+
+	if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
+		*(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
+	else
+		*(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
+
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+					data->client.sb_virt,
+					reqd_len_sb_in,
+					ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	__qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				cmd_buf, cmd_len,
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+		return ret;
+	}
+
+	if (qseecom.qsee_reentrancy_support) {
+		ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+	} else {
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+						ret);
+				return ret;
+			}
+		} else {
+			if (resp.result != QSEOS_RESULT_SUCCESS) {
+				pr_err("Response result %d not supported\n",
+								resp.result);
+				ret = -EINVAL;
+			}
+		}
+	}
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+				data->client.sb_virt, data->client.sb_length,
+				ION_IOC_INV_CACHES);
+	if (ret)
+		pr_err("cache operation failed %d\n", ret);
+	return ret;
+}
+
+static int qseecom_send_cmd(struct qseecom_dev_handle *data, void __user *argp)
+{
+	int ret = 0;
+	struct qseecom_send_cmd_req req;
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	if (__validate_send_cmd_inputs(data, &req))
+		return -EINVAL;
+
+	ret = __qseecom_send_cmd(data, &req);
+
+	if (ret)
+		return ret;
+
+	return ret;
+}
+
+int __boundary_checks_offset(struct qseecom_send_modfd_cmd_req *req,
+			struct qseecom_send_modfd_listener_resp *lstnr_resp,
+			struct qseecom_dev_handle *data, int i) {
+
+	if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+						(req->ifd_data[i].fd > 0)) {
+		if ((req->cmd_req_len < sizeof(uint32_t)) ||
+			(req->ifd_data[i].cmd_buf_offset >
+			req->cmd_req_len - sizeof(uint32_t))) {
+			pr_err("Invalid offset (req len) 0x%x\n",
+				req->ifd_data[i].cmd_buf_offset);
+			return -EINVAL;
+		}
+	} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+					(lstnr_resp->ifd_data[i].fd > 0)) {
+		if ((lstnr_resp->resp_len < sizeof(uint32_t)) ||
+			(lstnr_resp->ifd_data[i].cmd_buf_offset >
+			lstnr_resp->resp_len - sizeof(uint32_t))) {
+			pr_err("Invalid offset (lstnr resp len) 0x%x\n",
+				lstnr_resp->ifd_data[i].cmd_buf_offset);
+			return -EINVAL;
+		}
+	}
+	return 0;
+}
+
+static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
+			struct qseecom_dev_handle *data)
+{
+	struct ion_handle *ihandle;
+	char *field;
+	int ret = 0;
+	int i = 0;
+	uint32_t len = 0;
+	struct scatterlist *sg;
+	struct qseecom_send_modfd_cmd_req *req = NULL;
+	struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
+	struct qseecom_registered_listener_list *this_lstnr = NULL;
+	uint32_t offset;
+	struct sg_table *sg_ptr;
+
+	if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+			(data->type != QSEECOM_CLIENT_APP))
+		return -EFAULT;
+
+	if (msg == NULL) {
+		pr_err("Invalid address\n");
+		return -EINVAL;
+	}
+	if (data->type == QSEECOM_LISTENER_SERVICE) {
+		lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
+		this_lstnr = __qseecom_find_svc(data->listener.id);
+		if (IS_ERR_OR_NULL(this_lstnr)) {
+			pr_err("Invalid listener ID\n");
+			return -ENOMEM;
+		}
+	} else {
+		req = (struct qseecom_send_modfd_cmd_req *)msg;
+	}
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+						(req->ifd_data[i].fd > 0)) {
+			ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+					req->ifd_data[i].fd);
+			if (IS_ERR_OR_NULL(ihandle)) {
+				pr_err("Ion client can't retrieve the handle\n");
+				return -ENOMEM;
+			}
+			field = (char *) req->cmd_req_buf +
+				req->ifd_data[i].cmd_buf_offset;
+		} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+				(lstnr_resp->ifd_data[i].fd > 0)) {
+			ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+						lstnr_resp->ifd_data[i].fd);
+			if (IS_ERR_OR_NULL(ihandle)) {
+				pr_err("Ion client can't retrieve the handle\n");
+				return -ENOMEM;
+			}
+			field = lstnr_resp->resp_buf_ptr +
+				lstnr_resp->ifd_data[i].cmd_buf_offset;
+		} else {
+			continue;
+		}
+		/* Populate the cmd data structure with the phys_addr */
+		sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
+		if (IS_ERR_OR_NULL(sg_ptr)) {
+			pr_err("IOn client could not retrieve sg table\n");
+			goto err;
+		}
+		if (sg_ptr->nents == 0) {
+			pr_err("Num of scattered entries is 0\n");
+			goto err;
+		}
+		if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
+			pr_err("Num of scattered entries");
+			pr_err(" (%d) is greater than max supported %d\n",
+				sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
+			goto err;
+		}
+		sg = sg_ptr->sgl;
+		if (sg_ptr->nents == 1) {
+			uint32_t *update;
+
+			if (__boundary_checks_offset(req, lstnr_resp, data, i))
+				goto err;
+			if ((data->type == QSEECOM_CLIENT_APP &&
+				(data->client.app_arch == ELFCLASS32 ||
+				data->client.app_arch == ELFCLASS64)) ||
+				(data->type == QSEECOM_LISTENER_SERVICE)) {
+				/*
+				 * Check if sg list phy add region is under 4GB
+				 */
+				if ((qseecom.qsee_version >= QSEE_VERSION_40) &&
+					(!cleanup) &&
+					((uint64_t)sg_dma_address(sg_ptr->sgl)
+					>= PHY_ADDR_4G - sg->length)) {
+					pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
+						data->client.app_name,
+						&(sg_dma_address(sg_ptr->sgl)),
+						sg->length);
+					goto err;
+				}
+				update = (uint32_t *) field;
+				*update = cleanup ? 0 :
+					(uint32_t)sg_dma_address(sg_ptr->sgl);
+			} else {
+				pr_err("QSEE app arch %u is not supported\n",
+							data->client.app_arch);
+				goto err;
+			}
+			len += (uint32_t)sg->length;
+		} else {
+			struct qseecom_sg_entry *update;
+			int j = 0;
+
+			if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+					(req->ifd_data[i].fd > 0)) {
+
+				if ((req->cmd_req_len <
+					 SG_ENTRY_SZ * sg_ptr->nents) ||
+					(req->ifd_data[i].cmd_buf_offset >
+						(req->cmd_req_len -
+						SG_ENTRY_SZ * sg_ptr->nents))) {
+					pr_err("Invalid offset = 0x%x\n",
+					req->ifd_data[i].cmd_buf_offset);
+					goto err;
+				}
+
+			} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+					(lstnr_resp->ifd_data[i].fd > 0)) {
+
+				if ((lstnr_resp->resp_len <
+						SG_ENTRY_SZ * sg_ptr->nents) ||
+				(lstnr_resp->ifd_data[i].cmd_buf_offset >
+						(lstnr_resp->resp_len -
+						SG_ENTRY_SZ * sg_ptr->nents))) {
+					goto err;
+				}
+			}
+			if ((data->type == QSEECOM_CLIENT_APP &&
+				(data->client.app_arch == ELFCLASS32 ||
+				data->client.app_arch == ELFCLASS64)) ||
+				(data->type == QSEECOM_LISTENER_SERVICE)) {
+				update = (struct qseecom_sg_entry *)field;
+				for (j = 0; j < sg_ptr->nents; j++) {
+					/*
+					 * Check if sg list PA is under 4GB
+					 */
+					if ((qseecom.qsee_version >=
+						QSEE_VERSION_40) &&
+						(!cleanup) &&
+						((uint64_t)(sg_dma_address(sg))
+						>= PHY_ADDR_4G - sg->length)) {
+						pr_err("App %s sgl PA exceeds 4G: phy_addr=%pKad, len=%x\n",
+							data->client.app_name,
+							&(sg_dma_address(sg)),
+							sg->length);
+						goto err;
+					}
+					update->phys_addr = cleanup ? 0 :
+						(uint32_t)sg_dma_address(sg);
+					update->len = cleanup ? 0 : sg->length;
+					update++;
+					len += sg->length;
+					sg = sg_next(sg);
+				}
+			} else {
+				pr_err("QSEE app arch %u is not supported\n",
+							data->client.app_arch);
+					goto err;
+			}
+		}
+
+		if (cleanup) {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+					ihandle, NULL, len,
+					ION_IOC_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+		} else {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+					ihandle, NULL, len,
+					ION_IOC_CLEAN_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+			if (data->type == QSEECOM_CLIENT_APP) {
+				offset = req->ifd_data[i].cmd_buf_offset;
+				data->sglistinfo_ptr[i].indexAndFlags =
+					SGLISTINFO_SET_INDEX_FLAG(
+					(sg_ptr->nents == 1), 0, offset);
+				data->sglistinfo_ptr[i].sizeOrCount =
+					(sg_ptr->nents == 1) ?
+					sg->length : sg_ptr->nents;
+				data->sglist_cnt = i + 1;
+			} else {
+				offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
+					+ (uintptr_t)lstnr_resp->resp_buf_ptr -
+					(uintptr_t)this_lstnr->sb_virt);
+				this_lstnr->sglistinfo_ptr[i].indexAndFlags =
+					SGLISTINFO_SET_INDEX_FLAG(
+					(sg_ptr->nents == 1), 0, offset);
+				this_lstnr->sglistinfo_ptr[i].sizeOrCount =
+					(sg_ptr->nents == 1) ?
+					sg->length : sg_ptr->nents;
+				this_lstnr->sglist_cnt = i + 1;
+			}
+		}
+		/* Deallocate the handle */
+		if (!IS_ERR_OR_NULL(ihandle))
+			ion_free(qseecom.ion_clnt, ihandle);
+	}
+	return ret;
+err:
+	if (!IS_ERR_OR_NULL(ihandle))
+		ion_free(qseecom.ion_clnt, ihandle);
+	return -ENOMEM;
+}
+
+static int __qseecom_allocate_sg_list_buffer(struct qseecom_dev_handle *data,
+		char *field, uint32_t fd_idx, struct sg_table *sg_ptr)
+{
+	struct scatterlist *sg = sg_ptr->sgl;
+	struct qseecom_sg_entry_64bit *sg_entry;
+	struct qseecom_sg_list_buf_hdr_64bit *buf_hdr;
+	void *buf;
+	uint i;
+	size_t size;
+	dma_addr_t coh_pmem;
+
+	if (fd_idx >= MAX_ION_FD) {
+		pr_err("fd_idx [%d] is invalid\n", fd_idx);
+		return -ENOMEM;
+	}
+	buf_hdr = (struct qseecom_sg_list_buf_hdr_64bit *)field;
+	memset((void *)buf_hdr, 0, QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT);
+	/* Allocate a contiguous kernel buffer */
+	size = sg_ptr->nents * SG_ENTRY_SZ_64BIT;
+	size = (size + PAGE_SIZE) & PAGE_MASK;
+	buf = dma_alloc_coherent(qseecom.pdev,
+			size, &coh_pmem, GFP_KERNEL);
+	if (buf == NULL) {
+		pr_err("failed to alloc memory for sg buf\n");
+		return -ENOMEM;
+	}
+	/* update qseecom_sg_list_buf_hdr_64bit */
+	buf_hdr->version = QSEECOM_SG_LIST_BUF_FORMAT_VERSION_2;
+	buf_hdr->new_buf_phys_addr = coh_pmem;
+	buf_hdr->nents_total = sg_ptr->nents;
+	/* save the left sg entries into new allocated buf */
+	sg_entry = (struct qseecom_sg_entry_64bit *)buf;
+	for (i = 0; i < sg_ptr->nents; i++) {
+		sg_entry->phys_addr = (uint64_t)sg_dma_address(sg);
+		sg_entry->len = sg->length;
+		sg_entry++;
+		sg = sg_next(sg);
+	}
+
+	data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
+	data->client.sec_buf_fd[fd_idx].vbase = buf;
+	data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
+	data->client.sec_buf_fd[fd_idx].size = size;
+
+	return 0;
+}
+
+static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
+			struct qseecom_dev_handle *data)
+{
+	struct ion_handle *ihandle;
+	char *field;
+	int ret = 0;
+	int i = 0;
+	uint32_t len = 0;
+	struct scatterlist *sg;
+	struct qseecom_send_modfd_cmd_req *req = NULL;
+	struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
+	struct qseecom_registered_listener_list *this_lstnr = NULL;
+	uint32_t offset;
+	struct sg_table *sg_ptr;
+
+	if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+			(data->type != QSEECOM_CLIENT_APP))
+		return -EFAULT;
+
+	if (msg == NULL) {
+		pr_err("Invalid address\n");
+		return -EINVAL;
+	}
+	if (data->type == QSEECOM_LISTENER_SERVICE) {
+		lstnr_resp = (struct qseecom_send_modfd_listener_resp *)msg;
+		this_lstnr = __qseecom_find_svc(data->listener.id);
+		if (IS_ERR_OR_NULL(this_lstnr)) {
+			pr_err("Invalid listener ID\n");
+			return -ENOMEM;
+		}
+	} else {
+		req = (struct qseecom_send_modfd_cmd_req *)msg;
+	}
+
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+						(req->ifd_data[i].fd > 0)) {
+			ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+					req->ifd_data[i].fd);
+			if (IS_ERR_OR_NULL(ihandle)) {
+				pr_err("Ion client can't retrieve the handle\n");
+				return -ENOMEM;
+			}
+			field = (char *) req->cmd_req_buf +
+				req->ifd_data[i].cmd_buf_offset;
+		} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+				(lstnr_resp->ifd_data[i].fd > 0)) {
+			ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+						lstnr_resp->ifd_data[i].fd);
+			if (IS_ERR_OR_NULL(ihandle)) {
+				pr_err("Ion client can't retrieve the handle\n");
+				return -ENOMEM;
+			}
+			field = lstnr_resp->resp_buf_ptr +
+				lstnr_resp->ifd_data[i].cmd_buf_offset;
+		} else {
+			continue;
+		}
+		/* Populate the cmd data structure with the phys_addr */
+		sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
+		if (IS_ERR_OR_NULL(sg_ptr)) {
+			pr_err("IOn client could not retrieve sg table\n");
+			goto err;
+		}
+		if (sg_ptr->nents == 0) {
+			pr_err("Num of scattered entries is 0\n");
+			goto err;
+		}
+		if (sg_ptr->nents > QSEECOM_MAX_SG_ENTRY) {
+			pr_warn("Num of scattered entries");
+			pr_warn(" (%d) is greater than %d\n",
+				sg_ptr->nents, QSEECOM_MAX_SG_ENTRY);
+			if (cleanup) {
+				if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
+					data->client.sec_buf_fd[i].vbase)
+					dma_free_coherent(qseecom.pdev,
+					data->client.sec_buf_fd[i].size,
+					data->client.sec_buf_fd[i].vbase,
+					data->client.sec_buf_fd[i].pbase);
+			} else {
+				ret = __qseecom_allocate_sg_list_buffer(data,
+						field, i, sg_ptr);
+				if (ret) {
+					pr_err("Failed to allocate sg list buffer\n");
+					goto err;
+				}
+			}
+			len = QSEECOM_SG_LIST_BUF_HDR_SZ_64BIT;
+			sg = sg_ptr->sgl;
+			goto cleanup;
+		}
+		sg = sg_ptr->sgl;
+		if (sg_ptr->nents == 1) {
+			uint64_t *update_64bit;
+
+			if (__boundary_checks_offset(req, lstnr_resp, data, i))
+				goto err;
+				/* 64bit app uses 64bit address */
+			update_64bit = (uint64_t *) field;
+			*update_64bit = cleanup ? 0 :
+					(uint64_t)sg_dma_address(sg_ptr->sgl);
+			len += (uint32_t)sg->length;
+		} else {
+			struct qseecom_sg_entry_64bit *update_64bit;
+			int j = 0;
+
+			if ((data->type != QSEECOM_LISTENER_SERVICE) &&
+					(req->ifd_data[i].fd > 0)) {
+
+				if ((req->cmd_req_len <
+					 SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
+					(req->ifd_data[i].cmd_buf_offset >
+					(req->cmd_req_len -
+					SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
+					pr_err("Invalid offset = 0x%x\n",
+					req->ifd_data[i].cmd_buf_offset);
+					goto err;
+				}
+
+			} else if ((data->type == QSEECOM_LISTENER_SERVICE) &&
+					(lstnr_resp->ifd_data[i].fd > 0)) {
+
+				if ((lstnr_resp->resp_len <
+					SG_ENTRY_SZ_64BIT * sg_ptr->nents) ||
+				(lstnr_resp->ifd_data[i].cmd_buf_offset >
+						(lstnr_resp->resp_len -
+					SG_ENTRY_SZ_64BIT * sg_ptr->nents))) {
+					goto err;
+				}
+			}
+			/* 64bit app uses 64bit address */
+			update_64bit = (struct qseecom_sg_entry_64bit *)field;
+			for (j = 0; j < sg_ptr->nents; j++) {
+				update_64bit->phys_addr = cleanup ? 0 :
+					(uint64_t)sg_dma_address(sg);
+				update_64bit->len = cleanup ? 0 :
+						(uint32_t)sg->length;
+				update_64bit++;
+				len += sg->length;
+				sg = sg_next(sg);
+			}
+		}
+cleanup:
+		if (cleanup) {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+					ihandle, NULL, len,
+					ION_IOC_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+		} else {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+					ihandle, NULL, len,
+					ION_IOC_CLEAN_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+			if (data->type == QSEECOM_CLIENT_APP) {
+				offset = req->ifd_data[i].cmd_buf_offset;
+				data->sglistinfo_ptr[i].indexAndFlags =
+					SGLISTINFO_SET_INDEX_FLAG(
+					(sg_ptr->nents == 1), 1, offset);
+				data->sglistinfo_ptr[i].sizeOrCount =
+					(sg_ptr->nents == 1) ?
+					sg->length : sg_ptr->nents;
+				data->sglist_cnt = i + 1;
+			} else {
+				offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
+					+ (uintptr_t)lstnr_resp->resp_buf_ptr -
+					(uintptr_t)this_lstnr->sb_virt);
+				this_lstnr->sglistinfo_ptr[i].indexAndFlags =
+					SGLISTINFO_SET_INDEX_FLAG(
+					(sg_ptr->nents == 1), 1, offset);
+				this_lstnr->sglistinfo_ptr[i].sizeOrCount =
+					(sg_ptr->nents == 1) ?
+					sg->length : sg_ptr->nents;
+				this_lstnr->sglist_cnt = i + 1;
+			}
+		}
+		/* Deallocate the handle */
+		if (!IS_ERR_OR_NULL(ihandle))
+			ion_free(qseecom.ion_clnt, ihandle);
+	}
+	return ret;
+err:
+	for (i = 0; i < MAX_ION_FD; i++)
+		if (data->client.sec_buf_fd[i].is_sec_buf_fd &&
+			data->client.sec_buf_fd[i].vbase)
+			dma_free_coherent(qseecom.pdev,
+				data->client.sec_buf_fd[i].size,
+				data->client.sec_buf_fd[i].vbase,
+				data->client.sec_buf_fd[i].pbase);
+	if (!IS_ERR_OR_NULL(ihandle))
+		ion_free(qseecom.ion_clnt, ihandle);
+	return -ENOMEM;
+}
+
+static int __qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
+					void __user *argp,
+					bool is_64bit_addr)
+{
+	int ret = 0;
+	int i;
+	struct qseecom_send_modfd_cmd_req req;
+	struct qseecom_send_cmd_req send_cmd_req;
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	send_cmd_req.cmd_req_buf = req.cmd_req_buf;
+	send_cmd_req.cmd_req_len = req.cmd_req_len;
+	send_cmd_req.resp_buf = req.resp_buf;
+	send_cmd_req.resp_len = req.resp_len;
+
+	if (__validate_send_cmd_inputs(data, &send_cmd_req))
+		return -EINVAL;
+
+	/* validate offsets */
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (req.ifd_data[i].cmd_buf_offset >= req.cmd_req_len) {
+			pr_err("Invalid offset %d = 0x%x\n",
+				i, req.ifd_data[i].cmd_buf_offset);
+			return -EINVAL;
+		}
+	}
+	req.cmd_req_buf = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req.cmd_req_buf);
+	req.resp_buf = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req.resp_buf);
+
+	if (!is_64bit_addr) {
+		ret = __qseecom_update_cmd_buf(&req, false, data);
+		if (ret)
+			return ret;
+		ret = __qseecom_send_cmd(data, &send_cmd_req);
+		if (ret)
+			return ret;
+		ret = __qseecom_update_cmd_buf(&req, true, data);
+		if (ret)
+			return ret;
+	} else {
+		ret = __qseecom_update_cmd_buf_64(&req, false, data);
+		if (ret)
+			return ret;
+		ret = __qseecom_send_cmd(data, &send_cmd_req);
+		if (ret)
+			return ret;
+		ret = __qseecom_update_cmd_buf_64(&req, true, data);
+		if (ret)
+			return ret;
+	}
+
+	return ret;
+}
+
+static int qseecom_send_modfd_cmd(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+	return __qseecom_send_modfd_cmd(data, argp, false);
+}
+
+static int qseecom_send_modfd_cmd_64(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+	return __qseecom_send_modfd_cmd(data, argp, true);
+}
+
+
+
+static int __qseecom_listener_has_rcvd_req(struct qseecom_dev_handle *data,
+		struct qseecom_registered_listener_list *svc)
+{
+	int ret;
+
+	ret = (svc->rcv_req_flag != 0);
+	return ret || data->abort;
+}
+
+static int qseecom_receive_req(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+	struct qseecom_registered_listener_list *this_lstnr;
+
+	this_lstnr = __qseecom_find_svc(data->listener.id);
+	if (!this_lstnr) {
+		pr_err("Invalid listener ID\n");
+		return -ENODATA;
+	}
+
+	while (1) {
+		if (wait_event_freezable(this_lstnr->rcv_req_wq,
+				__qseecom_listener_has_rcvd_req(data,
+				this_lstnr))) {
+			pr_debug("Interrupted: exiting Listener Service = %d\n",
+						(uint32_t)data->listener.id);
+			/* woken up for different reason */
+			return -ERESTARTSYS;
+		}
+
+		if (data->abort) {
+			pr_err("Aborting Listener Service = %d\n",
+						(uint32_t)data->listener.id);
+			return -ENODEV;
+		}
+		this_lstnr->rcv_req_flag = 0;
+		break;
+	}
+	return ret;
+}
+
+static bool __qseecom_is_fw_image_valid(const struct firmware *fw_entry)
+{
+	unsigned char app_arch = 0;
+	struct elf32_hdr *ehdr;
+	struct elf64_hdr *ehdr64;
+
+	app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
+
+	switch (app_arch) {
+	case ELFCLASS32: {
+		ehdr = (struct elf32_hdr *)fw_entry->data;
+		if (fw_entry->size < sizeof(*ehdr)) {
+			pr_err("%s: Not big enough to be an elf32 header\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) {
+			pr_err("%s: Not an elf32 header\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (ehdr->e_phnum == 0) {
+			pr_err("%s: No loadable segments\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (sizeof(struct elf32_phdr) * ehdr->e_phnum +
+		    sizeof(struct elf32_hdr) > fw_entry->size) {
+			pr_err("%s: Program headers not within mdt\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		break;
+	}
+	case ELFCLASS64: {
+		ehdr64 = (struct elf64_hdr *)fw_entry->data;
+		if (fw_entry->size < sizeof(*ehdr64)) {
+			pr_err("%s: Not big enough to be an elf64 header\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (memcmp(ehdr64->e_ident, ELFMAG, SELFMAG)) {
+			pr_err("%s: Not an elf64 header\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (ehdr64->e_phnum == 0) {
+			pr_err("%s: No loadable segments\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		if (sizeof(struct elf64_phdr) * ehdr64->e_phnum +
+		    sizeof(struct elf64_hdr) > fw_entry->size) {
+			pr_err("%s: Program headers not within mdt\n",
+					 qseecom.pdev->init_name);
+			return false;
+		}
+		break;
+	}
+	default: {
+		pr_err("QSEE app arch %u is not supported\n", app_arch);
+		return false;
+	}
+	}
+	return true;
+}
+
+static int __qseecom_get_fw_size(const char *appname, uint32_t *fw_size,
+					uint32_t *app_arch)
+{
+	int ret = -1;
+	int i = 0, rc = 0;
+	const struct firmware *fw_entry = NULL;
+	char fw_name[MAX_APP_NAME_SIZE];
+	struct elf32_hdr *ehdr;
+	struct elf64_hdr *ehdr64;
+	int num_images = 0;
+
+	snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
+	rc = request_firmware(&fw_entry, fw_name,  qseecom.pdev);
+	if (rc) {
+		pr_err("error with request_firmware\n");
+		ret = -EIO;
+		goto err;
+	}
+	if (!__qseecom_is_fw_image_valid(fw_entry)) {
+		ret = -EIO;
+		goto err;
+	}
+	*app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
+	*fw_size = fw_entry->size;
+	if (*app_arch == ELFCLASS32) {
+		ehdr = (struct elf32_hdr *)fw_entry->data;
+		num_images = ehdr->e_phnum;
+	} else if (*app_arch == ELFCLASS64) {
+		ehdr64 = (struct elf64_hdr *)fw_entry->data;
+		num_images = ehdr64->e_phnum;
+	} else {
+		pr_err("QSEE %s app, arch %u is not supported\n",
+						appname, *app_arch);
+		ret = -EIO;
+		goto err;
+	}
+	pr_debug("QSEE %s app, arch %u\n", appname, *app_arch);
+	release_firmware(fw_entry);
+	fw_entry = NULL;
+	for (i = 0; i < num_images; i++) {
+		memset(fw_name, 0, sizeof(fw_name));
+		snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
+		ret = request_firmware(&fw_entry, fw_name, qseecom.pdev);
+		if (ret)
+			goto err;
+		if (*fw_size > U32_MAX - fw_entry->size) {
+			pr_err("QSEE %s app file size overflow\n", appname);
+			ret = -EINVAL;
+			goto err;
+		}
+		*fw_size += fw_entry->size;
+		release_firmware(fw_entry);
+		fw_entry = NULL;
+	}
+
+	return ret;
+err:
+	if (fw_entry)
+		release_firmware(fw_entry);
+	*fw_size = 0;
+	return ret;
+}
+
+static int __qseecom_get_fw_data(const char *appname, u8 *img_data,
+				uint32_t fw_size,
+				struct qseecom_load_app_ireq *load_req)
+{
+	int ret = -1;
+	int i = 0, rc = 0;
+	const struct firmware *fw_entry = NULL;
+	char fw_name[MAX_APP_NAME_SIZE];
+	u8 *img_data_ptr = img_data;
+	struct elf32_hdr *ehdr;
+	struct elf64_hdr *ehdr64;
+	int num_images = 0;
+	unsigned char app_arch = 0;
+
+	snprintf(fw_name, sizeof(fw_name), "%s.mdt", appname);
+	rc = request_firmware(&fw_entry, fw_name,  qseecom.pdev);
+	if (rc) {
+		ret = -EIO;
+		goto err;
+	}
+
+	load_req->img_len = fw_entry->size;
+	if (load_req->img_len > fw_size) {
+		pr_err("app %s size %zu is larger than buf size %u\n",
+			appname, fw_entry->size, fw_size);
+		ret = -EINVAL;
+		goto err;
+	}
+	memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
+	img_data_ptr = img_data_ptr + fw_entry->size;
+	load_req->mdt_len = fw_entry->size; /*Get MDT LEN*/
+
+	app_arch = *(unsigned char *)(fw_entry->data + EI_CLASS);
+	if (app_arch == ELFCLASS32) {
+		ehdr = (struct elf32_hdr *)fw_entry->data;
+		num_images = ehdr->e_phnum;
+	} else if (app_arch == ELFCLASS64) {
+		ehdr64 = (struct elf64_hdr *)fw_entry->data;
+		num_images = ehdr64->e_phnum;
+	} else {
+		pr_err("QSEE %s app, arch %u is not supported\n",
+						appname, app_arch);
+		ret = -EIO;
+		goto err;
+	}
+	release_firmware(fw_entry);
+	fw_entry = NULL;
+	for (i = 0; i < num_images; i++) {
+		snprintf(fw_name, ARRAY_SIZE(fw_name), "%s.b%02d", appname, i);
+		ret = request_firmware(&fw_entry, fw_name,  qseecom.pdev);
+		if (ret) {
+			pr_err("Failed to locate blob %s\n", fw_name);
+			goto err;
+		}
+		if ((fw_entry->size > U32_MAX - load_req->img_len) ||
+			(fw_entry->size + load_req->img_len > fw_size)) {
+			pr_err("Invalid file size for %s\n", fw_name);
+			ret = -EINVAL;
+			goto err;
+		}
+		memcpy(img_data_ptr, fw_entry->data, fw_entry->size);
+		img_data_ptr = img_data_ptr + fw_entry->size;
+		load_req->img_len += fw_entry->size;
+		release_firmware(fw_entry);
+		fw_entry = NULL;
+	}
+	return ret;
+err:
+	release_firmware(fw_entry);
+	return ret;
+}
+
+static int __qseecom_allocate_img_data(struct ion_handle **pihandle,
+			u8 **data, uint32_t fw_size, ion_phys_addr_t *paddr)
+{
+	size_t len = 0;
+	int ret = 0;
+	ion_phys_addr_t pa;
+	struct ion_handle *ihandle = NULL;
+	u8 *img_data = NULL;
+
+	ihandle = ion_alloc(qseecom.ion_clnt, fw_size,
+			SZ_4K, ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+
+	if (IS_ERR_OR_NULL(ihandle)) {
+		pr_err("ION alloc failed\n");
+		return -ENOMEM;
+	}
+	img_data = (u8 *)ion_map_kernel(qseecom.ion_clnt,
+					ihandle);
+
+	if (IS_ERR_OR_NULL(img_data)) {
+		pr_err("ION memory mapping for image loading failed\n");
+		ret = -ENOMEM;
+		goto exit_ion_free;
+	}
+	/* Get the physical address of the ION BUF */
+	ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
+	if (ret) {
+		pr_err("physical memory retrieval failure\n");
+		ret = -EIO;
+		goto exit_ion_unmap_kernel;
+	}
+
+	*pihandle = ihandle;
+	*data = img_data;
+	*paddr = pa;
+	return ret;
+
+exit_ion_unmap_kernel:
+	ion_unmap_kernel(qseecom.ion_clnt, ihandle);
+exit_ion_free:
+	ion_free(qseecom.ion_clnt, ihandle);
+	ihandle = NULL;
+	return ret;
+}
+
+static void __qseecom_free_img_data(struct ion_handle **ihandle)
+{
+	ion_unmap_kernel(qseecom.ion_clnt, *ihandle);
+	ion_free(qseecom.ion_clnt, *ihandle);
+	*ihandle = NULL;
+}
+
+static int __qseecom_load_fw(struct qseecom_dev_handle *data, char *appname,
+				uint32_t *app_id)
+{
+	int ret = -1;
+	uint32_t fw_size = 0;
+	struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
+	struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
+	struct qseecom_command_scm_resp resp;
+	u8 *img_data = NULL;
+	ion_phys_addr_t pa = 0;
+	struct ion_handle *ihandle = NULL;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	uint32_t app_arch = 0;
+
+	if (!data || !appname || !app_id) {
+		pr_err("Null pointer to data or appname or appid\n");
+		return -EINVAL;
+	}
+	*app_id = 0;
+	if (__qseecom_get_fw_size(appname, &fw_size, &app_arch))
+		return -EIO;
+	data->client.app_arch = app_arch;
+
+	/* Check and load cmnlib */
+	if (qseecom.qsee_version > QSEEE_VERSION_00) {
+		if (!qseecom.commonlib_loaded && app_arch == ELFCLASS32) {
+			ret = qseecom_load_commonlib_image(data, "cmnlib");
+			if (ret) {
+				pr_err("failed to load cmnlib\n");
+				return -EIO;
+			}
+			qseecom.commonlib_loaded = true;
+			pr_debug("cmnlib is loaded\n");
+		}
+
+		if (!qseecom.commonlib64_loaded && app_arch == ELFCLASS64) {
+			ret = qseecom_load_commonlib_image(data, "cmnlib64");
+			if (ret) {
+				pr_err("failed to load cmnlib64\n");
+				return -EIO;
+			}
+			qseecom.commonlib64_loaded = true;
+			pr_debug("cmnlib64 is loaded\n");
+		}
+	}
+
+	ret = __qseecom_allocate_img_data(&ihandle, &img_data, fw_size, &pa);
+	if (ret)
+		return ret;
+
+	ret = __qseecom_get_fw_data(appname, img_data, fw_size, &load_req);
+	if (ret) {
+		ret = -EIO;
+		goto exit_free_img_data;
+	}
+
+	/* Populate the load_req parameters */
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		load_req.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+		load_req.mdt_len = load_req.mdt_len;
+		load_req.img_len = load_req.img_len;
+		strlcpy(load_req.app_name, appname, MAX_APP_NAME_SIZE);
+		load_req.phy_addr = (uint32_t)pa;
+		cmd_buf = (void *)&load_req;
+		cmd_len = sizeof(struct qseecom_load_app_ireq);
+	} else {
+		load_req_64bit.qsee_cmd_id = QSEOS_APP_START_COMMAND;
+		load_req_64bit.mdt_len = load_req.mdt_len;
+		load_req_64bit.img_len = load_req.img_len;
+		strlcpy(load_req_64bit.app_name, appname, MAX_APP_NAME_SIZE);
+		load_req_64bit.phy_addr = (uint64_t)pa;
+		cmd_buf = (void *)&load_req_64bit;
+		cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+		mutex_unlock(&qsee_bw_mutex);
+		if (ret) {
+			ret = -EIO;
+			goto exit_free_img_data;
+		}
+	}
+
+	ret = __qseecom_enable_clk_scale_up(data);
+	if (ret) {
+		ret = -EIO;
+		goto exit_unregister_bus_bw_need;
+	}
+
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle,
+				img_data, fw_size,
+				ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit_disable_clk_vote;
+	}
+
+	/* SCM_CALL to load the image */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+			&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to load failed : ret %d\n", ret);
+		ret = -EIO;
+		goto exit_disable_clk_vote;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		*app_id = resp.data;
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret)
+			pr_err("process_incomplete_cmd FAILED\n");
+		else
+			*app_id = resp.data;
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("scm call failed with response QSEOS_RESULT FAILURE\n");
+		break;
+	default:
+		pr_err("scm call return unknown response %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	}
+
+exit_disable_clk_vote:
+	__qseecom_disable_clk_scale_down(data);
+
+exit_unregister_bus_bw_need:
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		qseecom_unregister_bus_bandwidth_needs(data);
+		mutex_unlock(&qsee_bw_mutex);
+	}
+
+exit_free_img_data:
+	__qseecom_free_img_data(&ihandle);
+	return ret;
+}
+
+static int qseecom_load_commonlib_image(struct qseecom_dev_handle *data,
+					char *cmnlib_name)
+{
+	int ret = 0;
+	uint32_t fw_size = 0;
+	struct qseecom_load_app_ireq load_req = {0, 0, 0, 0};
+	struct qseecom_load_app_64bit_ireq load_req_64bit = {0, 0, 0, 0};
+	struct qseecom_command_scm_resp resp;
+	u8 *img_data = NULL;
+	ion_phys_addr_t pa = 0;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	uint32_t app_arch = 0;
+
+	if (!cmnlib_name) {
+		pr_err("cmnlib_name is NULL\n");
+		return -EINVAL;
+	}
+	if (strlen(cmnlib_name) >= MAX_APP_NAME_SIZE) {
+		pr_err("The cmnlib_name (%s) with length %zu is not valid\n",
+			cmnlib_name, strlen(cmnlib_name));
+		return -EINVAL;
+	}
+
+	if (__qseecom_get_fw_size(cmnlib_name, &fw_size, &app_arch))
+		return -EIO;
+
+	ret = __qseecom_allocate_img_data(&qseecom.cmnlib_ion_handle,
+						&img_data, fw_size, &pa);
+	if (ret)
+		return -EIO;
+
+	ret = __qseecom_get_fw_data(cmnlib_name, img_data, fw_size, &load_req);
+	if (ret) {
+		ret = -EIO;
+		goto exit_free_img_data;
+	}
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		load_req.phy_addr = (uint32_t)pa;
+		load_req.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
+		cmd_buf = (void *)&load_req;
+		cmd_len = sizeof(struct qseecom_load_lib_image_ireq);
+	} else {
+		load_req_64bit.phy_addr = (uint64_t)pa;
+		load_req_64bit.qsee_cmd_id = QSEOS_LOAD_SERV_IMAGE_COMMAND;
+		load_req_64bit.img_len = load_req.img_len;
+		load_req_64bit.mdt_len = load_req.mdt_len;
+		cmd_buf = (void *)&load_req_64bit;
+		cmd_len = sizeof(struct qseecom_load_lib_image_64bit_ireq);
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+		mutex_unlock(&qsee_bw_mutex);
+		if (ret) {
+			ret = -EIO;
+			goto exit_free_img_data;
+		}
+	}
+
+	/* Vote for the SFPB clock */
+	ret = __qseecom_enable_clk_scale_up(data);
+	if (ret) {
+		ret = -EIO;
+		goto exit_unregister_bus_bw_need;
+	}
+
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, qseecom.cmnlib_ion_handle,
+				img_data, fw_size,
+				ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit_disable_clk_vote;
+	}
+
+	/* SCM_CALL to load the image */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+							&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to load failed : ret %d\n", ret);
+		ret = -EIO;
+		goto exit_disable_clk_vote;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("scm call failed w/response result%d\n", resp.result);
+		ret = -EINVAL;
+		goto exit_disable_clk_vote;
+	case  QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd failed err: %d\n", ret);
+			goto exit_disable_clk_vote;
+		}
+		break;
+	default:
+		pr_err("scm call return unknown response %d\n",	resp.result);
+		ret = -EINVAL;
+		goto exit_disable_clk_vote;
+	}
+
+exit_disable_clk_vote:
+	__qseecom_disable_clk_scale_down(data);
+
+exit_unregister_bus_bw_need:
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		qseecom_unregister_bus_bandwidth_needs(data);
+		mutex_unlock(&qsee_bw_mutex);
+	}
+
+exit_free_img_data:
+	__qseecom_free_img_data(&qseecom.cmnlib_ion_handle);
+	return ret;
+}
+
+static int qseecom_unload_commonlib_image(void)
+{
+	int ret = -EINVAL;
+	struct qseecom_unload_lib_image_ireq unload_req = {0};
+	struct qseecom_command_scm_resp resp;
+
+	/* Populate the remaining parameters */
+	unload_req.qsee_cmd_id = QSEOS_UNLOAD_SERV_IMAGE_COMMAND;
+
+	/* SCM_CALL to load the image */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &unload_req,
+			sizeof(struct qseecom_unload_lib_image_ireq),
+						&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to unload lib failed : ret %d\n", ret);
+		ret = -EIO;
+	} else {
+		switch (resp.result) {
+		case QSEOS_RESULT_SUCCESS:
+			break;
+		case QSEOS_RESULT_FAILURE:
+			pr_err("scm fail resp.result QSEOS_RESULT FAILURE\n");
+			break;
+		default:
+			pr_err("scm call return unknown response %d\n",
+					resp.result);
+			ret = -EINVAL;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+int qseecom_start_app(struct qseecom_handle **handle,
+						char *app_name, uint32_t size)
+{
+	int32_t ret = 0;
+	unsigned long flags = 0;
+	struct qseecom_dev_handle *data = NULL;
+	struct qseecom_check_app_ireq app_ireq;
+	struct qseecom_registered_app_list *entry = NULL;
+	struct qseecom_registered_kclient_list *kclient_entry = NULL;
+	bool found_app = false;
+	size_t len;
+	ion_phys_addr_t pa;
+	uint32_t fw_size, app_arch;
+	uint32_t app_id = 0;
+
+	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
+		pr_err("Not allowed to be called in %d state\n",
+				atomic_read(&qseecom.qseecom_state));
+		return -EPERM;
+	}
+	if (!app_name) {
+		pr_err("failed to get the app name\n");
+		return -EINVAL;
+	}
+
+	if (strlen(app_name) >= MAX_APP_NAME_SIZE) {
+		pr_err("The app_name (%s) with length %zu is not valid\n",
+			app_name, strlen(app_name));
+		return -EINVAL;
+	}
+
+	*handle = kzalloc(sizeof(struct qseecom_handle), GFP_KERNEL);
+	if (!(*handle))
+		return -ENOMEM;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data) {
+		if (ret == 0) {
+			kfree(*handle);
+			*handle = NULL;
+		}
+		return -ENOMEM;
+	}
+	data->abort = 0;
+	data->type = QSEECOM_CLIENT_APP;
+	data->released = false;
+	data->client.sb_length = size;
+	data->client.user_virt_sb_base = 0;
+	data->client.ihandle = NULL;
+
+	init_waitqueue_head(&data->abort_wq);
+
+	data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
+				ION_HEAP(ION_QSECOM_HEAP_ID), 0);
+	if (IS_ERR_OR_NULL(data->client.ihandle)) {
+		pr_err("Ion client could not retrieve the handle\n");
+		kfree(data);
+		kfree(*handle);
+		*handle = NULL;
+		return -EINVAL;
+	}
+	mutex_lock(&app_access_lock);
+
+	app_ireq.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+	strlcpy(app_ireq.app_name, app_name, MAX_APP_NAME_SIZE);
+	ret = __qseecom_check_app_exists(app_ireq, &app_id);
+	if (ret)
+		goto err;
+
+	strlcpy(data->client.app_name, app_name, MAX_APP_NAME_SIZE);
+	if (app_id) {
+		pr_warn("App id %d for [%s] app exists\n", app_id,
+			(char *)app_ireq.app_name);
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(entry,
+				&qseecom.registered_app_list_head, list){
+			if (entry->app_id == app_id) {
+				entry->ref_cnt++;
+				found_app = true;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+		if (!found_app)
+			pr_warn("App_id %d [%s] was loaded but not registered\n",
+					ret, (char *)app_ireq.app_name);
+	} else {
+		/* load the app and get the app_id  */
+		pr_debug("%s: Loading app for the first time'\n",
+				qseecom.pdev->init_name);
+		ret = __qseecom_load_fw(data, app_name, &app_id);
+		if (ret < 0)
+			goto err;
+	}
+	data->client.app_id = app_id;
+	if (!found_app) {
+		entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+		if (!entry) {
+			pr_err("kmalloc for app entry failed\n");
+			ret =  -ENOMEM;
+			goto err;
+		}
+		entry->app_id = app_id;
+		entry->ref_cnt = 1;
+		strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
+		if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
+			ret = -EIO;
+			kfree(entry);
+			goto err;
+		}
+		entry->app_arch = app_arch;
+		entry->app_blocked = false;
+		entry->blocked_on_listener_id = 0;
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_add_tail(&entry->list, &qseecom.registered_app_list_head);
+		spin_unlock_irqrestore(&qseecom.registered_app_list_lock,
+									flags);
+	}
+
+	/* Get the physical address of the ION BUF */
+	ret = ion_phys(qseecom.ion_clnt, data->client.ihandle, &pa, &len);
+	if (ret) {
+		pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+			ret);
+		goto err;
+	}
+
+	/* Populate the structure for sending scm call to load image */
+	data->client.sb_virt = (char *) ion_map_kernel(qseecom.ion_clnt,
+							data->client.ihandle);
+	if (IS_ERR_OR_NULL(data->client.sb_virt)) {
+		pr_err("ION memory mapping for client shared buf failed\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+	data->client.user_virt_sb_base = (uintptr_t)data->client.sb_virt;
+	data->client.sb_phys = (phys_addr_t)pa;
+	(*handle)->dev = (void *)data;
+	(*handle)->sbuf = (unsigned char *)data->client.sb_virt;
+	(*handle)->sbuf_len = data->client.sb_length;
+
+	kclient_entry = kzalloc(sizeof(*kclient_entry), GFP_KERNEL);
+	if (!kclient_entry) {
+		ret = -ENOMEM;
+		goto err;
+	}
+	kclient_entry->handle = *handle;
+
+	spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
+	list_add_tail(&kclient_entry->list,
+			&qseecom.registered_kclient_list_head);
+	spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
+
+	mutex_unlock(&app_access_lock);
+	return 0;
+
+err:
+	kfree(data);
+	kfree(*handle);
+	*handle = NULL;
+	mutex_unlock(&app_access_lock);
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_start_app);
+
+int qseecom_shutdown_app(struct qseecom_handle **handle)
+{
+	int ret = -EINVAL;
+	struct qseecom_dev_handle *data;
+
+	struct qseecom_registered_kclient_list *kclient = NULL;
+	unsigned long flags = 0;
+	bool found_handle = false;
+
+	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
+		pr_err("Not allowed to be called in %d state\n",
+				atomic_read(&qseecom.qseecom_state));
+		return -EPERM;
+	}
+
+	if ((handle == NULL)  || (*handle == NULL)) {
+		pr_err("Handle is not initialized\n");
+		return -EINVAL;
+	}
+	data =	(struct qseecom_dev_handle *) ((*handle)->dev);
+	mutex_lock(&app_access_lock);
+
+	spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
+	list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
+				list) {
+		if (kclient->handle == (*handle)) {
+			list_del(&kclient->list);
+			found_handle = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
+	if (!found_handle)
+		pr_err("Unable to find the handle, exiting\n");
+	else
+		ret = qseecom_unload_app(data, false);
+
+	mutex_unlock(&app_access_lock);
+	if (ret == 0) {
+		kzfree(data);
+		kzfree(*handle);
+		kzfree(kclient);
+		*handle = NULL;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_shutdown_app);
+
+int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
+			uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len)
+{
+	int ret = 0;
+	struct qseecom_send_cmd_req req = {0, 0, 0, 0};
+	struct qseecom_dev_handle *data;
+	bool perf_enabled = false;
+
+	if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
+		pr_err("Not allowed to be called in %d state\n",
+				atomic_read(&qseecom.qseecom_state));
+		return -EPERM;
+	}
+
+	if (handle == NULL) {
+		pr_err("Handle is not initialized\n");
+		return -EINVAL;
+	}
+	data = handle->dev;
+
+	req.cmd_req_len = sbuf_len;
+	req.resp_len = rbuf_len;
+	req.cmd_req_buf = send_buf;
+	req.resp_buf = resp_buf;
+
+	if (__validate_send_cmd_inputs(data, &req))
+		return -EINVAL;
+
+	mutex_lock(&app_access_lock);
+	if (qseecom.support_bus_scaling) {
+		ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
+		if (ret) {
+			pr_err("Failed to set bw.\n");
+			mutex_unlock(&app_access_lock);
+			return ret;
+		}
+	}
+	/*
+	 * On targets where crypto clock is handled by HLOS,
+	 * if clk_access_cnt is zero and perf_enabled is false,
+	 * then the crypto clock was not enabled before sending cmd
+	 * to tz, qseecom will enable the clock to avoid service failure.
+	 */
+	if (!qseecom.no_clock_support &&
+		!qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
+		pr_debug("ce clock is not enabled!\n");
+		ret = qseecom_perf_enable(data);
+		if (ret) {
+			pr_err("Failed to vote for clock with err %d\n",
+						ret);
+			mutex_unlock(&app_access_lock);
+			return -EINVAL;
+		}
+		perf_enabled = true;
+	}
+	if (!strcmp(data->client.app_name, "securemm"))
+		data->use_legacy_cmd = true;
+
+	ret = __qseecom_send_cmd(data, &req);
+	data->use_legacy_cmd = false;
+	if (qseecom.support_bus_scaling)
+		__qseecom_add_bw_scale_down_timer(
+			QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+
+	if (perf_enabled) {
+		qsee_disable_clock_vote(data, CLK_DFAB);
+		qsee_disable_clock_vote(data, CLK_SFPB);
+	}
+
+	mutex_unlock(&app_access_lock);
+
+	if (ret)
+		return ret;
+
+	pr_debug("sending cmd_req->rsp size: %u, ptr: 0x%pK\n",
+			req.resp_len, req.resp_buf);
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_send_command);
+
+int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high)
+{
+	int ret = 0;
+
+	if ((handle == NULL) || (handle->dev == NULL)) {
+		pr_err("No valid kernel client\n");
+		return -EINVAL;
+	}
+	if (high) {
+		if (qseecom.support_bus_scaling) {
+			mutex_lock(&qsee_bw_mutex);
+			__qseecom_register_bus_bandwidth_needs(handle->dev,
+									HIGH);
+			mutex_unlock(&qsee_bw_mutex);
+		} else {
+			ret = qseecom_perf_enable(handle->dev);
+			if (ret)
+				pr_err("Failed to vote for clock with err %d\n",
+						ret);
+		}
+	} else {
+		if (!qseecom.support_bus_scaling) {
+			qsee_disable_clock_vote(handle->dev, CLK_DFAB);
+			qsee_disable_clock_vote(handle->dev, CLK_SFPB);
+		} else {
+			mutex_lock(&qsee_bw_mutex);
+			qseecom_unregister_bus_bandwidth_needs(handle->dev);
+			mutex_unlock(&qsee_bw_mutex);
+		}
+	}
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_set_bandwidth);
+
+int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc)
+{
+	struct qseecom_registered_app_list dummy_app_entry = { {0} };
+	struct qseecom_dev_handle dummy_private_data = {0};
+	struct qseecom_command_scm_resp resp;
+	int ret = 0;
+
+	if (!desc) {
+		pr_err("desc is NULL\n");
+		return -EINVAL;
+	}
+
+	resp.result = desc->ret[0];	/*req_cmd*/
+	resp.resp_type = desc->ret[1];	/*app_id*/
+	resp.data = desc->ret[2];	/*listener_id*/
+
+	dummy_private_data.client.app_id = desc->ret[1];
+	dummy_app_entry.app_id = desc->ret[1];
+
+	mutex_lock(&app_access_lock);
+	ret = __qseecom_process_reentrancy(&resp, &dummy_app_entry,
+					&dummy_private_data);
+	mutex_unlock(&app_access_lock);
+	if (ret)
+		pr_err("Failed to req cmd %d lsnr %d on app %d, ret = %d\n",
+			(int)desc->ret[0], (int)desc->ret[2],
+			(int)desc->ret[1], ret);
+	desc->ret[0] = resp.result;
+	desc->ret[1] = resp.resp_type;
+	desc->ret[2] = resp.data;
+	return ret;
+}
+EXPORT_SYMBOL(qseecom_process_listener_from_smcinvoke);
+
+static int qseecom_send_resp(void)
+{
+	qseecom.send_resp_flag = 1;
+	wake_up_interruptible(&qseecom.send_resp_wq);
+	return 0;
+}
+
+static int qseecom_reentrancy_send_resp(struct qseecom_dev_handle *data)
+{
+	struct qseecom_registered_listener_list *this_lstnr = NULL;
+
+	pr_debug("lstnr %d send resp, wakeup\n", data->listener.id);
+	this_lstnr = __qseecom_find_svc(data->listener.id);
+	if (this_lstnr == NULL)
+		return -EINVAL;
+	qseecom.send_resp_flag = 1;
+	this_lstnr->send_resp_flag = 1;
+	wake_up_interruptible(&qseecom.send_resp_wq);
+	return 0;
+}
+
+static int __validate_send_modfd_resp_inputs(struct qseecom_dev_handle *data,
+			struct qseecom_send_modfd_listener_resp *resp,
+			struct qseecom_registered_listener_list *this_lstnr)
+{
+	int i;
+
+	if (!data || !resp || !this_lstnr) {
+		pr_err("listener handle or resp msg is null\n");
+		return -EINVAL;
+	}
+
+	if (resp->resp_buf_ptr == NULL) {
+		pr_err("resp buffer is null\n");
+		return -EINVAL;
+	}
+	/* validate resp buf length */
+	if ((resp->resp_len == 0) ||
+			(resp->resp_len > this_lstnr->sb_length)) {
+		pr_err("resp buf length %d not valid\n", resp->resp_len);
+		return -EINVAL;
+	}
+
+	if ((uintptr_t)resp->resp_buf_ptr > (ULONG_MAX - resp->resp_len)) {
+		pr_err("Integer overflow in resp_len & resp_buf\n");
+		return -EINVAL;
+	}
+	if ((uintptr_t)this_lstnr->user_virt_sb_base >
+					(ULONG_MAX - this_lstnr->sb_length)) {
+		pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+		return -EINVAL;
+	}
+	/* validate resp buf */
+	if (((uintptr_t)resp->resp_buf_ptr <
+		(uintptr_t)this_lstnr->user_virt_sb_base) ||
+		((uintptr_t)resp->resp_buf_ptr >=
+		((uintptr_t)this_lstnr->user_virt_sb_base +
+				this_lstnr->sb_length)) ||
+		(((uintptr_t)resp->resp_buf_ptr + resp->resp_len) >
+		((uintptr_t)this_lstnr->user_virt_sb_base +
+						this_lstnr->sb_length))) {
+		pr_err("resp buf is out of shared buffer region\n");
+		return -EINVAL;
+	}
+
+	/* validate offsets */
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (resp->ifd_data[i].cmd_buf_offset >= resp->resp_len) {
+			pr_err("Invalid offset %d = 0x%x\n",
+				i, resp->ifd_data[i].cmd_buf_offset);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+static int __qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
+				void __user *argp, bool is_64bit_addr)
+{
+	struct qseecom_send_modfd_listener_resp resp;
+	struct qseecom_registered_listener_list *this_lstnr = NULL;
+
+	if (copy_from_user(&resp, argp, sizeof(resp))) {
+		pr_err("copy_from_user failed");
+		return -EINVAL;
+	}
+
+	this_lstnr = __qseecom_find_svc(data->listener.id);
+	if (this_lstnr == NULL)
+		return -EINVAL;
+
+	if (__validate_send_modfd_resp_inputs(data, &resp, this_lstnr))
+		return -EINVAL;
+
+	resp.resp_buf_ptr = this_lstnr->sb_virt +
+		(uintptr_t)(resp.resp_buf_ptr - this_lstnr->user_virt_sb_base);
+
+	if (!is_64bit_addr)
+		__qseecom_update_cmd_buf(&resp, false, data);
+	else
+		__qseecom_update_cmd_buf_64(&resp, false, data);
+	qseecom.send_resp_flag = 1;
+	this_lstnr->send_resp_flag = 1;
+	wake_up_interruptible(&qseecom.send_resp_wq);
+	return 0;
+}
+
+static int qseecom_send_modfd_resp(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	return __qseecom_send_modfd_resp(data, argp, false);
+}
+
+static int qseecom_send_modfd_resp_64(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	return __qseecom_send_modfd_resp(data, argp, true);
+}
+
+static int qseecom_get_qseos_version(struct qseecom_dev_handle *data,
+						void __user *argp)
+{
+	struct qseecom_qseos_version_req req;
+
+	if (copy_from_user(&req, argp, sizeof(req))) {
+		pr_err("copy_from_user failed");
+		return -EINVAL;
+	}
+	req.qseos_version = qseecom.qseos_version;
+	if (copy_to_user(argp, &req, sizeof(req))) {
+		pr_err("copy_to_user failed");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __qseecom_enable_clk(enum qseecom_ce_hw_instance ce)
+{
+	int rc = 0;
+	struct qseecom_clk *qclk = NULL;
+
+	if (qseecom.no_clock_support)
+		return 0;
+
+	if (ce == CLK_QSEE)
+		qclk = &qseecom.qsee;
+	if (ce == CLK_CE_DRV)
+		qclk = &qseecom.ce_drv;
+
+	if (qclk == NULL) {
+		pr_err("CLK type not supported\n");
+		return -EINVAL;
+	}
+	mutex_lock(&clk_access_lock);
+
+	if (qclk->clk_access_cnt == ULONG_MAX) {
+		pr_err("clk_access_cnt beyond limitation\n");
+		goto err;
+	}
+	if (qclk->clk_access_cnt > 0) {
+		qclk->clk_access_cnt++;
+		mutex_unlock(&clk_access_lock);
+		return rc;
+	}
+
+	/* Enable CE core clk */
+	if (qclk->ce_core_clk != NULL) {
+		rc = clk_prepare_enable(qclk->ce_core_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE core clk\n");
+			goto err;
+		}
+	}
+	/* Enable CE clk */
+	if (qclk->ce_clk != NULL) {
+		rc = clk_prepare_enable(qclk->ce_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE iface clk\n");
+			goto ce_clk_err;
+		}
+	}
+	/* Enable AXI clk */
+	if (qclk->ce_bus_clk != NULL) {
+		rc = clk_prepare_enable(qclk->ce_bus_clk);
+		if (rc) {
+			pr_err("Unable to enable/prepare CE bus clk\n");
+			goto ce_bus_clk_err;
+		}
+	}
+	qclk->clk_access_cnt++;
+	mutex_unlock(&clk_access_lock);
+	return 0;
+
+ce_bus_clk_err:
+	if (qclk->ce_clk != NULL)
+		clk_disable_unprepare(qclk->ce_clk);
+ce_clk_err:
+	if (qclk->ce_core_clk != NULL)
+		clk_disable_unprepare(qclk->ce_core_clk);
+err:
+	mutex_unlock(&clk_access_lock);
+	return -EIO;
+}
+
+static void __qseecom_disable_clk(enum qseecom_ce_hw_instance ce)
+{
+	struct qseecom_clk *qclk;
+
+	if (qseecom.no_clock_support)
+		return;
+
+	if (ce == CLK_QSEE)
+		qclk = &qseecom.qsee;
+	else
+		qclk = &qseecom.ce_drv;
+
+	mutex_lock(&clk_access_lock);
+
+	if (qclk->clk_access_cnt == 0) {
+		mutex_unlock(&clk_access_lock);
+		return;
+	}
+
+	if (qclk->clk_access_cnt == 1) {
+		if (qclk->ce_clk != NULL)
+			clk_disable_unprepare(qclk->ce_clk);
+		if (qclk->ce_core_clk != NULL)
+			clk_disable_unprepare(qclk->ce_core_clk);
+		if (qclk->ce_bus_clk != NULL)
+			clk_disable_unprepare(qclk->ce_bus_clk);
+	}
+	qclk->clk_access_cnt--;
+	mutex_unlock(&clk_access_lock);
+}
+
+static int qsee_vote_for_clock(struct qseecom_dev_handle *data,
+						int32_t clk_type)
+{
+	int ret = 0;
+	struct qseecom_clk *qclk;
+
+	if (qseecom.no_clock_support)
+		return 0;
+
+	qclk = &qseecom.qsee;
+	if (!qseecom.qsee_perf_client)
+		return ret;
+
+	switch (clk_type) {
+	case CLK_DFAB:
+		mutex_lock(&qsee_bw_mutex);
+		if (!qseecom.qsee_bw_count) {
+			if (qseecom.qsee_sfpb_bw_count > 0)
+				ret = msm_bus_scale_client_update_request(
+					qseecom.qsee_perf_client, 3);
+			else {
+				if (qclk->ce_core_src_clk != NULL)
+					ret = __qseecom_enable_clk(CLK_QSEE);
+				if (!ret) {
+					ret =
+					msm_bus_scale_client_update_request(
+						qseecom.qsee_perf_client, 1);
+					if ((ret) &&
+						(qclk->ce_core_src_clk != NULL))
+						__qseecom_disable_clk(CLK_QSEE);
+				}
+			}
+			if (ret)
+				pr_err("DFAB Bandwidth req failed (%d)\n",
+								ret);
+			else {
+				qseecom.qsee_bw_count++;
+				data->perf_enabled = true;
+			}
+		} else {
+			qseecom.qsee_bw_count++;
+			data->perf_enabled = true;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	case CLK_SFPB:
+		mutex_lock(&qsee_bw_mutex);
+		if (!qseecom.qsee_sfpb_bw_count) {
+			if (qseecom.qsee_bw_count > 0)
+				ret = msm_bus_scale_client_update_request(
+					qseecom.qsee_perf_client, 3);
+			else {
+				if (qclk->ce_core_src_clk != NULL)
+					ret = __qseecom_enable_clk(CLK_QSEE);
+				if (!ret) {
+					ret =
+					msm_bus_scale_client_update_request(
+						qseecom.qsee_perf_client, 2);
+					if ((ret) &&
+						(qclk->ce_core_src_clk != NULL))
+						__qseecom_disable_clk(CLK_QSEE);
+				}
+			}
+
+			if (ret)
+				pr_err("SFPB Bandwidth req failed (%d)\n",
+								ret);
+			else {
+				qseecom.qsee_sfpb_bw_count++;
+				data->fast_load_enabled = true;
+			}
+		} else {
+			qseecom.qsee_sfpb_bw_count++;
+			data->fast_load_enabled = true;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	default:
+		pr_err("Clock type not defined\n");
+		break;
+	}
+	return ret;
+}
+
+static void qsee_disable_clock_vote(struct qseecom_dev_handle *data,
+						int32_t clk_type)
+{
+	int32_t ret = 0;
+	struct qseecom_clk *qclk;
+
+	qclk = &qseecom.qsee;
+
+	if (qseecom.no_clock_support)
+		return;
+	if (!qseecom.qsee_perf_client)
+		return;
+
+	switch (clk_type) {
+	case CLK_DFAB:
+		mutex_lock(&qsee_bw_mutex);
+		if (qseecom.qsee_bw_count == 0) {
+			pr_err("Client error.Extra call to disable DFAB clk\n");
+			mutex_unlock(&qsee_bw_mutex);
+			return;
+		}
+
+		if (qseecom.qsee_bw_count == 1) {
+			if (qseecom.qsee_sfpb_bw_count > 0)
+				ret = msm_bus_scale_client_update_request(
+					qseecom.qsee_perf_client, 2);
+			else {
+				ret = msm_bus_scale_client_update_request(
+						qseecom.qsee_perf_client, 0);
+				if ((!ret) && (qclk->ce_core_src_clk != NULL))
+					__qseecom_disable_clk(CLK_QSEE);
+			}
+			if (ret)
+				pr_err("SFPB Bandwidth req fail (%d)\n",
+								ret);
+			else {
+				qseecom.qsee_bw_count--;
+				data->perf_enabled = false;
+			}
+		} else {
+			qseecom.qsee_bw_count--;
+			data->perf_enabled = false;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	case CLK_SFPB:
+		mutex_lock(&qsee_bw_mutex);
+		if (qseecom.qsee_sfpb_bw_count == 0) {
+			pr_err("Client error.Extra call to disable SFPB clk\n");
+			mutex_unlock(&qsee_bw_mutex);
+			return;
+		}
+		if (qseecom.qsee_sfpb_bw_count == 1) {
+			if (qseecom.qsee_bw_count > 0)
+				ret = msm_bus_scale_client_update_request(
+						qseecom.qsee_perf_client, 1);
+			else {
+				ret = msm_bus_scale_client_update_request(
+						qseecom.qsee_perf_client, 0);
+				if ((!ret) && (qclk->ce_core_src_clk != NULL))
+					__qseecom_disable_clk(CLK_QSEE);
+			}
+			if (ret)
+				pr_err("SFPB Bandwidth req fail (%d)\n",
+								ret);
+			else {
+				qseecom.qsee_sfpb_bw_count--;
+				data->fast_load_enabled = false;
+			}
+		} else {
+			qseecom.qsee_sfpb_bw_count--;
+			data->fast_load_enabled = false;
+		}
+		mutex_unlock(&qsee_bw_mutex);
+		break;
+	default:
+		pr_err("Clock type not defined\n");
+		break;
+	}
+
+}
+
+static int qseecom_load_external_elf(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct ion_handle *ihandle;	/* Ion handle */
+	struct qseecom_load_img_req load_img_req;
+	int uret = 0;
+	int ret;
+	ion_phys_addr_t pa = 0;
+	size_t len;
+	struct qseecom_load_app_ireq load_req;
+	struct qseecom_load_app_64bit_ireq load_req_64bit;
+	struct qseecom_command_scm_resp resp;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	/* Copy the relevant information needed for loading the image */
+	if (copy_from_user(&load_img_req,
+				(void __user *)argp,
+				sizeof(struct qseecom_load_img_req))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	/* Get the handle of the shared fd */
+	ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+				load_img_req.ifd_data_fd);
+	if (IS_ERR_OR_NULL(ihandle)) {
+		pr_err("Ion client could not retrieve the handle\n");
+		return -ENOMEM;
+	}
+
+	/* Get the physical address of the ION BUF */
+	ret = ion_phys(qseecom.ion_clnt, ihandle, &pa, &len);
+	if (ret) {
+		pr_err("Cannot get phys_addr for the Ion Client, ret = %d\n",
+			ret);
+		return ret;
+	}
+	if (load_img_req.mdt_len > len || load_img_req.img_len > len) {
+		pr_err("ion len %zu is smaller than mdt_len %u or img_len %u\n",
+				len, load_img_req.mdt_len,
+				load_img_req.img_len);
+		return ret;
+	}
+	/* Populate the structure for sending scm call to load image */
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		load_req.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
+		load_req.mdt_len = load_img_req.mdt_len;
+		load_req.img_len = load_img_req.img_len;
+		load_req.phy_addr = (uint32_t)pa;
+		cmd_buf = (void *)&load_req;
+		cmd_len = sizeof(struct qseecom_load_app_ireq);
+	} else {
+		load_req_64bit.qsee_cmd_id = QSEOS_LOAD_EXTERNAL_ELF_COMMAND;
+		load_req_64bit.mdt_len = load_img_req.mdt_len;
+		load_req_64bit.img_len = load_img_req.img_len;
+		load_req_64bit.phy_addr = (uint64_t)pa;
+		cmd_buf = (void *)&load_req_64bit;
+		cmd_len = sizeof(struct qseecom_load_app_64bit_ireq);
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		ret = __qseecom_register_bus_bandwidth_needs(data, MEDIUM);
+		mutex_unlock(&qsee_bw_mutex);
+		if (ret) {
+			ret = -EIO;
+			goto exit_cpu_restore;
+		}
+	}
+
+	/* Vote for the SFPB clock */
+	ret = __qseecom_enable_clk_scale_up(data);
+	if (ret) {
+		ret = -EIO;
+		goto exit_register_bus_bandwidth_needs;
+	}
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, ihandle, NULL, len,
+				ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		goto exit_disable_clock;
+	}
+	/*  SCM_CALL to load the external elf */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, cmd_buf, cmd_len,
+			&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to load failed : ret %d\n",
+				ret);
+		ret = -EFAULT;
+		goto exit_disable_clock;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		pr_err("%s: qseos result incomplete\n", __func__);
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret)
+			pr_err("process_incomplete_cmd failed: err: %d\n", ret);
+		break;
+	case QSEOS_RESULT_FAILURE:
+		pr_err("scm_call rsp.result is QSEOS_RESULT_FAILURE\n");
+		ret = -EFAULT;
+		break;
+	default:
+		pr_err("scm_call response result %d not supported\n",
+							resp.result);
+		ret = -EFAULT;
+		break;
+	}
+
+exit_disable_clock:
+	__qseecom_disable_clk_scale_down(data);
+
+exit_register_bus_bandwidth_needs:
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		uret = qseecom_unregister_bus_bandwidth_needs(data);
+		mutex_unlock(&qsee_bw_mutex);
+		if (uret)
+			pr_err("Failed to unregister bus bw needs %d, scm_call ret %d\n",
+								uret, ret);
+	}
+
+exit_cpu_restore:
+	/* Deallocate the handle */
+	if (!IS_ERR_OR_NULL(ihandle))
+		ion_free(qseecom.ion_clnt, ihandle);
+	return ret;
+}
+
+static int qseecom_unload_external_elf(struct qseecom_dev_handle *data)
+{
+	int ret = 0;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_unload_app_ireq req;
+
+	/* unavailable client app */
+	data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
+
+	/* Populate the structure for sending scm call to unload image */
+	req.qsee_cmd_id = QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND;
+
+	/* SCM_CALL to unload the external elf */
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1, &req,
+			sizeof(struct qseecom_unload_app_ireq),
+			&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call to unload failed : ret %d\n",
+				ret);
+		ret = -EFAULT;
+		goto qseecom_unload_external_elf_scm_err;
+	}
+	if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret)
+			pr_err("process_incomplete_cmd fail err: %d\n",
+					ret);
+	} else {
+		if (resp.result != QSEOS_RESULT_SUCCESS) {
+			pr_err("scm_call to unload image failed resp.result =%d\n",
+						resp.result);
+			ret = -EFAULT;
+		}
+	}
+
+qseecom_unload_external_elf_scm_err:
+
+	return ret;
+}
+
+static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
+					void __user *argp)
+{
+
+	int32_t ret;
+	struct qseecom_qseos_app_load_query query_req;
+	struct qseecom_check_app_ireq req;
+	struct qseecom_registered_app_list *entry = NULL;
+	unsigned long flags = 0;
+	uint32_t app_arch = 0, app_id = 0;
+	bool found_app = false;
+
+	/* Copy the relevant information needed for loading the image */
+	if (copy_from_user(&query_req,
+				(void __user *)argp,
+				sizeof(struct qseecom_qseos_app_load_query))) {
+		pr_err("copy_from_user failed\n");
+		return -EFAULT;
+	}
+
+	req.qsee_cmd_id = QSEOS_APP_LOOKUP_COMMAND;
+	query_req.app_name[MAX_APP_NAME_SIZE-1] = '\0';
+	strlcpy(req.app_name, query_req.app_name, MAX_APP_NAME_SIZE);
+
+	ret = __qseecom_check_app_exists(req, &app_id);
+	if (ret) {
+		pr_err(" scm call to check if app is loaded failed");
+		return ret;	/* scm call failed */
+	}
+	if (app_id) {
+		pr_debug("App id %d (%s) already exists\n", app_id,
+			(char *)(req.app_name));
+		spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+		list_for_each_entry(entry,
+				&qseecom.registered_app_list_head, list){
+			if (entry->app_id == app_id) {
+				app_arch = entry->app_arch;
+				entry->ref_cnt++;
+				found_app = true;
+				break;
+			}
+		}
+		spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+		data->client.app_id = app_id;
+		query_req.app_id = app_id;
+		if (app_arch) {
+			data->client.app_arch = app_arch;
+			query_req.app_arch = app_arch;
+		} else {
+			data->client.app_arch = 0;
+			query_req.app_arch = 0;
+		}
+		strlcpy(data->client.app_name, query_req.app_name,
+				MAX_APP_NAME_SIZE);
+		/*
+		 * If app was loaded by appsbl before and was not registered,
+		 * regiser this app now.
+		 */
+		if (!found_app) {
+			pr_debug("Register app %d [%s] which was loaded before\n",
+					ret, (char *)query_req.app_name);
+			entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+			if (!entry) {
+				pr_err("kmalloc for app entry failed\n");
+				return  -ENOMEM;
+			}
+			entry->app_id = app_id;
+			entry->ref_cnt = 1;
+			entry->app_arch = data->client.app_arch;
+			strlcpy(entry->app_name, data->client.app_name,
+				MAX_APP_NAME_SIZE);
+			entry->app_blocked = false;
+			entry->blocked_on_listener_id = 0;
+			spin_lock_irqsave(&qseecom.registered_app_list_lock,
+				flags);
+			list_add_tail(&entry->list,
+				&qseecom.registered_app_list_head);
+			spin_unlock_irqrestore(
+				&qseecom.registered_app_list_lock, flags);
+		}
+		if (copy_to_user(argp, &query_req, sizeof(query_req))) {
+			pr_err("copy_to_user failed\n");
+			return -EFAULT;
+		}
+		return -EEXIST;	/* app already loaded */
+	} else {
+		return 0;	/* app not loaded */
+	}
+}
+
+static int __qseecom_get_ce_pipe_info(
+			enum qseecom_key_management_usage_type usage,
+			uint32_t *pipe, uint32_t **ce_hw, uint32_t unit)
+{
+	int ret = -EINVAL;
+	int i, j;
+	struct qseecom_ce_info_use *p = NULL;
+	int total = 0;
+	struct qseecom_ce_pipe_entry *pcepipe;
+
+	switch (usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		if (qseecom.support_fde) {
+			p = qseecom.ce_info.fde;
+			total = qseecom.ce_info.num_fde;
+		} else {
+			pr_err("system does not support fde\n");
+			return -EINVAL;
+		}
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		if (qseecom.support_pfe) {
+			p = qseecom.ce_info.pfe;
+			total = qseecom.ce_info.num_pfe;
+		} else {
+			pr_err("system does not support pfe\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("unsupported usage %d\n", usage);
+		return -EINVAL;
+	}
+
+	for (j = 0; j < total; j++) {
+		if (p->unit_num == unit) {
+			pcepipe =  p->ce_pipe_entry;
+			for (i = 0; i < p->num_ce_pipe_entries; i++) {
+				(*ce_hw)[i] = pcepipe->ce_num;
+				*pipe = pcepipe->ce_pipe_pair;
+				pcepipe++;
+			}
+			ret = 0;
+			break;
+		}
+		p++;
+	}
+	return ret;
+}
+
+static int __qseecom_generate_and_save_key(struct qseecom_dev_handle *data,
+			enum qseecom_key_management_usage_type usage,
+			struct qseecom_key_generate_ireq *ireq)
+{
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", usage);
+		return -EFAULT;
+	}
+	ret = __qseecom_enable_clk(CLK_QSEE);
+	if (ret)
+		return ret;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				ireq, sizeof(struct qseecom_key_generate_ireq),
+				&resp, sizeof(resp));
+	if (ret) {
+		if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
+			pr_debug("Key ID exists.\n");
+			ret = 0;
+		} else {
+			pr_err("scm call to generate key failed : %d\n", ret);
+			ret = -EFAULT;
+		}
+		goto generate_key_exit;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_FAIL_KEY_ID_EXISTS:
+		pr_debug("Key ID exists.\n");
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			if (resp.result == QSEOS_RESULT_FAIL_KEY_ID_EXISTS) {
+				pr_debug("Key ID exists.\n");
+				ret = 0;
+			} else {
+				pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+					resp.result);
+			}
+		}
+		break;
+	case QSEOS_RESULT_FAILURE:
+	default:
+		pr_err("gen key scm call failed resp.result %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	}
+generate_key_exit:
+	__qseecom_disable_clk(CLK_QSEE);
+	return ret;
+}
+
+static int __qseecom_delete_saved_key(struct qseecom_dev_handle *data,
+			enum qseecom_key_management_usage_type usage,
+			struct qseecom_key_delete_ireq *ireq)
+{
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", usage);
+		return -EFAULT;
+	}
+	ret = __qseecom_enable_clk(CLK_QSEE);
+	if (ret)
+		return ret;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				ireq, sizeof(struct qseecom_key_delete_ireq),
+				&resp, sizeof(struct qseecom_command_scm_resp));
+	if (ret) {
+		if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+			pr_debug("Max attempts to input password reached.\n");
+			ret = -ERANGE;
+		} else {
+			pr_err("scm call to delete key failed : %d\n", ret);
+			ret = -EFAULT;
+		}
+		goto del_key_exit;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+					resp.result);
+			if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+				pr_debug("Max attempts to input password reached.\n");
+				ret = -ERANGE;
+			}
+		}
+		break;
+	case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
+		pr_debug("Max attempts to input password reached.\n");
+		ret = -ERANGE;
+		break;
+	case QSEOS_RESULT_FAILURE:
+	default:
+		pr_err("Delete key scm call failed resp.result %d\n",
+							resp.result);
+		ret = -EINVAL;
+		break;
+	}
+del_key_exit:
+	__qseecom_disable_clk(CLK_QSEE);
+	return ret;
+}
+
+static int __qseecom_set_clear_ce_key(struct qseecom_dev_handle *data,
+			enum qseecom_key_management_usage_type usage,
+			struct qseecom_key_select_ireq *ireq)
+{
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", usage);
+		return -EFAULT;
+	}
+	ret = __qseecom_enable_clk(CLK_QSEE);
+	if (ret)
+		return ret;
+
+	if (qseecom.qsee.instance != qseecom.ce_drv.instance) {
+		ret = __qseecom_enable_clk(CLK_CE_DRV);
+		if (ret)
+			return ret;
+	}
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				ireq, sizeof(struct qseecom_key_select_ireq),
+				&resp, sizeof(struct qseecom_command_scm_resp));
+	if (ret) {
+		if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+			pr_debug("Max attempts to input password reached.\n");
+			ret = -ERANGE;
+		} else if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+			pr_debug("Set Key operation under processing...\n");
+			ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		} else {
+			pr_err("scm call to set QSEOS_PIPE_ENC key failed : %d\n",
+				ret);
+			ret = -EFAULT;
+		}
+		goto set_key_exit;
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (ret) {
+			pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+					resp.result);
+			if (resp.result ==
+				QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+				pr_debug("Set Key operation under processing...\n");
+				ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+			}
+			if (resp.result == QSEOS_RESULT_FAIL_MAX_ATTEMPT) {
+				pr_debug("Max attempts to input password reached.\n");
+				ret = -ERANGE;
+			}
+		}
+		break;
+	case QSEOS_RESULT_FAIL_MAX_ATTEMPT:
+		pr_debug("Max attempts to input password reached.\n");
+		ret = -ERANGE;
+		break;
+	case QSEOS_RESULT_FAIL_PENDING_OPERATION:
+		pr_debug("Set Key operation under processing...\n");
+		ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		break;
+	case QSEOS_RESULT_FAILURE:
+	default:
+		pr_err("Set key scm call failed resp.result %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	}
+set_key_exit:
+	__qseecom_disable_clk(CLK_QSEE);
+	if (qseecom.qsee.instance != qseecom.ce_drv.instance)
+		__qseecom_disable_clk(CLK_CE_DRV);
+	return ret;
+}
+
+static int __qseecom_update_current_key_user_info(
+			struct qseecom_dev_handle *data,
+			enum qseecom_key_management_usage_type usage,
+			struct qseecom_key_userinfo_update_ireq *ireq)
+{
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+				usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", usage);
+		return -EFAULT;
+	}
+	ret = __qseecom_enable_clk(CLK_QSEE);
+	if (ret)
+		return ret;
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+		ireq, sizeof(struct qseecom_key_userinfo_update_ireq),
+		&resp, sizeof(struct qseecom_command_scm_resp));
+	if (ret) {
+		if (ret == -EINVAL &&
+			resp.result == QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+			pr_debug("Set Key operation under processing...\n");
+			ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		} else {
+			pr_err("scm call to update key userinfo failed: %d\n",
+									ret);
+			__qseecom_disable_clk(CLK_QSEE);
+			return -EFAULT;
+		}
+	}
+
+	switch (resp.result) {
+	case QSEOS_RESULT_SUCCESS:
+		break;
+	case QSEOS_RESULT_INCOMPLETE:
+		ret = __qseecom_process_incomplete_cmd(data, &resp);
+		if (resp.result ==
+			QSEOS_RESULT_FAIL_PENDING_OPERATION) {
+			pr_debug("Set Key operation under processing...\n");
+			ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		}
+		if (ret)
+			pr_err("process_incomplete_cmd FAILED, resp.result %d\n",
+					resp.result);
+		break;
+	case QSEOS_RESULT_FAIL_PENDING_OPERATION:
+		pr_debug("Update Key operation under processing...\n");
+		ret = QSEOS_RESULT_FAIL_PENDING_OPERATION;
+		break;
+	case QSEOS_RESULT_FAILURE:
+	default:
+		pr_err("Set key scm call failed resp.result %d\n", resp.result);
+		ret = -EINVAL;
+		break;
+	}
+
+	__qseecom_disable_clk(CLK_QSEE);
+	return ret;
+}
+
+
+static int qseecom_enable_ice_setup(int usage)
+{
+	int ret = 0;
+
+	if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
+		ret = qcom_ice_setup_ice_hw("ufs", true);
+	else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
+		ret = qcom_ice_setup_ice_hw("sdcc", true);
+
+	return ret;
+}
+
+static int qseecom_disable_ice_setup(int usage)
+{
+	int ret = 0;
+
+	if (usage == QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION)
+		ret = qcom_ice_setup_ice_hw("ufs", false);
+	else if (usage == QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION)
+		ret = qcom_ice_setup_ice_hw("sdcc", false);
+
+	return ret;
+}
+
+static int qseecom_get_ce_hw_instance(uint32_t unit, uint32_t usage)
+{
+	struct qseecom_ce_info_use *pce_info_use, *p;
+	int total = 0;
+	int i;
+
+	switch (usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		p = qseecom.ce_info.fde;
+		total = qseecom.ce_info.num_fde;
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		p = qseecom.ce_info.pfe;
+		total = qseecom.ce_info.num_pfe;
+		break;
+	default:
+		pr_err("unsupported usage %d\n", usage);
+		return -EINVAL;
+	}
+
+	pce_info_use = NULL;
+
+	for (i = 0; i < total; i++) {
+		if (p->unit_num == unit) {
+			pce_info_use = p;
+			break;
+		}
+		p++;
+	}
+	if (!pce_info_use) {
+		pr_err("can not find %d\n", unit);
+		return -EINVAL;
+	}
+	return pce_info_use->num_ce_pipe_entries;
+}
+
+static int qseecom_create_key(struct qseecom_dev_handle *data,
+			void __user *argp)
+{
+	int i;
+	uint32_t *ce_hw = NULL;
+	uint32_t pipe = 0;
+	int ret = 0;
+	uint32_t flags = 0;
+	struct qseecom_create_key_req create_key_req;
+	struct qseecom_key_generate_ireq generate_key_ireq;
+	struct qseecom_key_select_ireq set_key_ireq;
+	uint32_t entries = 0;
+
+	ret = copy_from_user(&create_key_req, argp, sizeof(create_key_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	if (create_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		create_key_req.usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("unsupported usage %d\n", create_key_req.usage);
+		ret = -EFAULT;
+		return ret;
+	}
+	entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
+					create_key_req.usage);
+	if (entries <= 0) {
+		pr_err("no ce instance for usage %d instance %d\n",
+			DEFAULT_CE_INFO_UNIT, create_key_req.usage);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
+	if (!ce_hw) {
+		ret = -ENOMEM;
+		return ret;
+	}
+	ret = __qseecom_get_ce_pipe_info(create_key_req.usage, &pipe, &ce_hw,
+			DEFAULT_CE_INFO_UNIT);
+	if (ret) {
+		pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
+		ret = -EINVAL;
+		goto free_buf;
+	}
+
+	if (qseecom.fde_key_size)
+		flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
+	else
+		flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
+
+	generate_key_ireq.flags = flags;
+	generate_key_ireq.qsee_command_id = QSEOS_GENERATE_KEY;
+	memset((void *)generate_key_ireq.key_id,
+			0, QSEECOM_KEY_ID_SIZE);
+	memset((void *)generate_key_ireq.hash32,
+			0, QSEECOM_HASH_SIZE);
+	memcpy((void *)generate_key_ireq.key_id,
+			(void *)key_id_array[create_key_req.usage].desc,
+			QSEECOM_KEY_ID_SIZE);
+	memcpy((void *)generate_key_ireq.hash32,
+			(void *)create_key_req.hash32,
+			QSEECOM_HASH_SIZE);
+
+	ret = __qseecom_generate_and_save_key(data,
+			create_key_req.usage, &generate_key_ireq);
+	if (ret) {
+		pr_err("Failed to generate key on storage: %d\n", ret);
+		goto free_buf;
+	}
+
+	for (i = 0; i < entries; i++) {
+		set_key_ireq.qsee_command_id = QSEOS_SET_KEY;
+		if (create_key_req.usage ==
+				QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
+			set_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
+			set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+
+		} else if (create_key_req.usage ==
+				QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
+			set_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
+			set_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+
+		} else {
+			set_key_ireq.ce = ce_hw[i];
+			set_key_ireq.pipe = pipe;
+		}
+		set_key_ireq.flags = flags;
+
+		/* set both PIPE_ENC and PIPE_ENC_XTS*/
+		set_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
+		memset((void *)set_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
+		memset((void *)set_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
+		memcpy((void *)set_key_ireq.key_id,
+			(void *)key_id_array[create_key_req.usage].desc,
+			QSEECOM_KEY_ID_SIZE);
+		memcpy((void *)set_key_ireq.hash32,
+				(void *)create_key_req.hash32,
+				QSEECOM_HASH_SIZE);
+		/*
+		 * It will return false if it is GPCE based crypto instance or
+		 * ICE is setup properly
+		 */
+		if (qseecom_enable_ice_setup(create_key_req.usage))
+			goto free_buf;
+
+		do {
+			ret = __qseecom_set_clear_ce_key(data,
+					create_key_req.usage,
+					&set_key_ireq);
+			/*
+			 * wait a little before calling scm again to let other
+			 * processes run
+			 */
+			if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
+				msleep(50);
+
+		} while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
+
+		qseecom_disable_ice_setup(create_key_req.usage);
+
+		if (ret) {
+			pr_err("Failed to create key: pipe %d, ce %d: %d\n",
+				pipe, ce_hw[i], ret);
+			goto free_buf;
+		} else {
+			pr_err("Set the key successfully\n");
+			if ((create_key_req.usage ==
+				QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) ||
+			     (create_key_req.usage ==
+				QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION))
+				goto free_buf;
+		}
+	}
+
+free_buf:
+	kzfree(ce_hw);
+	return ret;
+}
+
+static int qseecom_wipe_key(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	uint32_t *ce_hw = NULL;
+	uint32_t pipe = 0;
+	int ret = 0;
+	uint32_t flags = 0;
+	int i, j;
+	struct qseecom_wipe_key_req wipe_key_req;
+	struct qseecom_key_delete_ireq delete_key_ireq;
+	struct qseecom_key_select_ireq clear_key_ireq;
+	uint32_t entries = 0;
+
+	ret = copy_from_user(&wipe_key_req, argp, sizeof(wipe_key_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	if (wipe_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		wipe_key_req.usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("unsupported usage %d\n", wipe_key_req.usage);
+		ret = -EFAULT;
+		return ret;
+	}
+
+	entries = qseecom_get_ce_hw_instance(DEFAULT_CE_INFO_UNIT,
+					wipe_key_req.usage);
+	if (entries <= 0) {
+		pr_err("no ce instance for usage %d instance %d\n",
+			DEFAULT_CE_INFO_UNIT, wipe_key_req.usage);
+		ret = -EINVAL;
+		return ret;
+	}
+
+	ce_hw = kcalloc(entries, sizeof(*ce_hw), GFP_KERNEL);
+	if (!ce_hw) {
+		ret = -ENOMEM;
+		return ret;
+	}
+
+	ret = __qseecom_get_ce_pipe_info(wipe_key_req.usage, &pipe, &ce_hw,
+				DEFAULT_CE_INFO_UNIT);
+	if (ret) {
+		pr_err("Failed to retrieve pipe/ce_hw info: %d\n", ret);
+		ret = -EINVAL;
+		goto free_buf;
+	}
+
+	if (wipe_key_req.wipe_key_flag) {
+		delete_key_ireq.flags = flags;
+		delete_key_ireq.qsee_command_id = QSEOS_DELETE_KEY;
+		memset((void *)delete_key_ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
+		memcpy((void *)delete_key_ireq.key_id,
+			(void *)key_id_array[wipe_key_req.usage].desc,
+			QSEECOM_KEY_ID_SIZE);
+		memset((void *)delete_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
+
+		ret = __qseecom_delete_saved_key(data, wipe_key_req.usage,
+					&delete_key_ireq);
+		if (ret) {
+			pr_err("Failed to delete key from ssd storage: %d\n",
+				ret);
+			ret = -EFAULT;
+			goto free_buf;
+		}
+	}
+
+	for (j = 0; j < entries; j++) {
+		clear_key_ireq.qsee_command_id = QSEOS_SET_KEY;
+		if (wipe_key_req.usage ==
+				QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION) {
+			clear_key_ireq.ce = QSEECOM_UFS_ICE_CE_NUM;
+			clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+		} else if (wipe_key_req.usage ==
+			QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION) {
+			clear_key_ireq.ce = QSEECOM_SDCC_ICE_CE_NUM;
+			clear_key_ireq.pipe = QSEECOM_ICE_FDE_KEY_INDEX;
+		} else {
+			clear_key_ireq.ce = ce_hw[j];
+			clear_key_ireq.pipe = pipe;
+		}
+		clear_key_ireq.flags = flags;
+		clear_key_ireq.pipe_type = QSEOS_PIPE_ENC|QSEOS_PIPE_ENC_XTS;
+		for (i = 0; i < QSEECOM_KEY_ID_SIZE; i++)
+			clear_key_ireq.key_id[i] = QSEECOM_INVALID_KEY_ID;
+		memset((void *)clear_key_ireq.hash32, 0, QSEECOM_HASH_SIZE);
+
+		/*
+		 * It will return false if it is GPCE based crypto instance or
+		 * ICE is setup properly
+		 */
+		if (qseecom_enable_ice_setup(wipe_key_req.usage))
+			goto free_buf;
+
+		ret = __qseecom_set_clear_ce_key(data, wipe_key_req.usage,
+					&clear_key_ireq);
+
+		qseecom_disable_ice_setup(wipe_key_req.usage);
+
+		if (ret) {
+			pr_err("Failed to wipe key: pipe %d, ce %d: %d\n",
+				pipe, ce_hw[j], ret);
+			ret = -EFAULT;
+			goto free_buf;
+		}
+	}
+
+free_buf:
+	kzfree(ce_hw);
+	return ret;
+}
+
+static int qseecom_update_key_user_info(struct qseecom_dev_handle *data,
+			void __user *argp)
+{
+	int ret = 0;
+	uint32_t flags = 0;
+	struct qseecom_update_key_userinfo_req update_key_req;
+	struct qseecom_key_userinfo_update_ireq ireq;
+
+	ret = copy_from_user(&update_key_req, argp, sizeof(update_key_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	if (update_key_req.usage < QSEOS_KM_USAGE_DISK_ENCRYPTION ||
+		update_key_req.usage >= QSEOS_KM_USAGE_MAX) {
+		pr_err("Error:: unsupported usage %d\n", update_key_req.usage);
+		return -EFAULT;
+	}
+
+	ireq.qsee_command_id = QSEOS_UPDATE_KEY_USERINFO;
+
+	if (qseecom.fde_key_size)
+		flags |= QSEECOM_ICE_FDE_KEY_SIZE_32_BYTE;
+	else
+		flags |= QSEECOM_ICE_FDE_KEY_SIZE_16_BYTE;
+
+	ireq.flags = flags;
+	memset(ireq.key_id, 0, QSEECOM_KEY_ID_SIZE);
+	memset((void *)ireq.current_hash32, 0, QSEECOM_HASH_SIZE);
+	memset((void *)ireq.new_hash32, 0, QSEECOM_HASH_SIZE);
+	memcpy((void *)ireq.key_id,
+		(void *)key_id_array[update_key_req.usage].desc,
+		QSEECOM_KEY_ID_SIZE);
+	memcpy((void *)ireq.current_hash32,
+		(void *)update_key_req.current_hash32, QSEECOM_HASH_SIZE);
+	memcpy((void *)ireq.new_hash32,
+		(void *)update_key_req.new_hash32, QSEECOM_HASH_SIZE);
+
+	do {
+		ret = __qseecom_update_current_key_user_info(data,
+						update_key_req.usage,
+						&ireq);
+		/*
+		 * wait a little before calling scm again to let other
+		 * processes run
+		 */
+		if (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION)
+			msleep(50);
+
+	} while (ret == QSEOS_RESULT_FAIL_PENDING_OPERATION);
+	if (ret) {
+		pr_err("Failed to update key info: %d\n", ret);
+		return ret;
+	}
+	return ret;
+
+}
+static int qseecom_is_es_activated(void __user *argp)
+{
+	struct qseecom_is_es_activated_req req;
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	if (qseecom.qsee_version < QSEE_VERSION_04) {
+		pr_err("invalid qsee version\n");
+		return -ENODEV;
+	}
+
+	if (argp == NULL) {
+		pr_err("arg is null\n");
+		return -EINVAL;
+	}
+
+	ret = qseecom_scm_call(SCM_SVC_ES, SCM_IS_ACTIVATED_ID,
+		&req, sizeof(req), &resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call failed\n");
+		return ret;
+	}
+
+	req.is_activated = resp.result;
+	ret = copy_to_user(argp, &req, sizeof(req));
+	if (ret) {
+		pr_err("copy_to_user failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int qseecom_save_partition_hash(void __user *argp)
+{
+	struct qseecom_save_partition_hash_req req;
+	struct qseecom_command_scm_resp resp;
+	int ret;
+
+	memset(&resp, 0x00, sizeof(resp));
+
+	if (qseecom.qsee_version < QSEE_VERSION_04) {
+		pr_err("invalid qsee version\n");
+		return -ENODEV;
+	}
+
+	if (argp == NULL) {
+		pr_err("arg is null\n");
+		return -EINVAL;
+	}
+
+	ret = copy_from_user(&req, argp, sizeof(req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	ret = qseecom_scm_call(SCM_SVC_ES, SCM_SAVE_PARTITION_HASH_ID,
+		       (void *)&req, sizeof(req), (void *)&resp, sizeof(resp));
+	if (ret) {
+		pr_err("qseecom_scm_call failed\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int qseecom_mdtp_cipher_dip(void __user *argp)
+{
+	struct qseecom_mdtp_cipher_dip_req req;
+	u32 tzbuflenin, tzbuflenout;
+	char *tzbufin = NULL, *tzbufout = NULL;
+	struct scm_desc desc = {0};
+	int ret;
+
+	do {
+		/* Copy the parameters from userspace */
+		if (argp == NULL) {
+			pr_err("arg is null\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		ret = copy_from_user(&req, argp, sizeof(req));
+		if (ret) {
+			pr_err("copy_from_user failed, ret= %d\n", ret);
+			break;
+		}
+
+		if (req.in_buf == NULL || req.out_buf == NULL ||
+			req.in_buf_size == 0 || req.in_buf_size > MAX_DIP ||
+			req.out_buf_size == 0 || req.out_buf_size > MAX_DIP ||
+				req.direction > 1) {
+			pr_err("invalid parameters\n");
+			ret = -EINVAL;
+			break;
+		}
+
+		/* Copy the input buffer from userspace to kernel space */
+		tzbuflenin = PAGE_ALIGN(req.in_buf_size);
+		tzbufin = kzalloc(tzbuflenin, GFP_KERNEL);
+		if (!tzbufin) {
+			pr_err("error allocating in buffer\n");
+			ret = -ENOMEM;
+			break;
+		}
+
+		ret = copy_from_user(tzbufin, req.in_buf, req.in_buf_size);
+		if (ret) {
+			pr_err("copy_from_user failed, ret=%d\n", ret);
+			break;
+		}
+
+		dmac_flush_range(tzbufin, tzbufin + tzbuflenin);
+
+		/* Prepare the output buffer in kernel space */
+		tzbuflenout = PAGE_ALIGN(req.out_buf_size);
+		tzbufout = kzalloc(tzbuflenout, GFP_KERNEL);
+		if (!tzbufout) {
+			pr_err("error allocating out buffer\n");
+			ret = -ENOMEM;
+			break;
+		}
+
+		dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
+
+		/* Send the command to TZ */
+		desc.arginfo = TZ_MDTP_CIPHER_DIP_ID_PARAM_ID;
+		desc.args[0] = virt_to_phys(tzbufin);
+		desc.args[1] = req.in_buf_size;
+		desc.args[2] = virt_to_phys(tzbufout);
+		desc.args[3] = req.out_buf_size;
+		desc.args[4] = req.direction;
+
+		ret = __qseecom_enable_clk(CLK_QSEE);
+		if (ret)
+			break;
+
+		ret = scm_call2(TZ_MDTP_CIPHER_DIP_ID, &desc);
+
+		__qseecom_disable_clk(CLK_QSEE);
+
+		if (ret) {
+			pr_err("scm_call2 failed for SCM_SVC_MDTP, ret=%d\n",
+				ret);
+			break;
+		}
+
+		/* Copy the output buffer from kernel space to userspace */
+		dmac_flush_range(tzbufout, tzbufout + tzbuflenout);
+		ret = copy_to_user(req.out_buf, tzbufout, req.out_buf_size);
+		if (ret) {
+			pr_err("copy_to_user failed, ret=%d\n", ret);
+			break;
+		}
+	} while (0);
+
+	kzfree(tzbufin);
+	kzfree(tzbufout);
+
+	return ret;
+}
+
+static int __qseecom_qteec_validate_msg(struct qseecom_dev_handle *data,
+				struct qseecom_qteec_req *req)
+{
+	if (!data || !data->client.ihandle) {
+		pr_err("Client or client handle is not initialized\n");
+		return -EINVAL;
+	}
+
+	if (data->type != QSEECOM_CLIENT_APP)
+		return -EFAULT;
+
+	if (req->req_len > UINT_MAX - req->resp_len) {
+		pr_err("Integer overflow detected in req_len & rsp_len\n");
+		return -EINVAL;
+	}
+
+	if (req->req_len + req->resp_len > data->client.sb_length) {
+		pr_debug("Not enough memory to fit cmd_buf.\n");
+		pr_debug("resp_buf. Required: %u, Available: %zu\n",
+		(req->req_len + req->resp_len), data->client.sb_length);
+		return -ENOMEM;
+	}
+
+	if (req->req_ptr == NULL || req->resp_ptr == NULL) {
+		pr_err("cmd buffer or response buffer is null\n");
+		return -EINVAL;
+	}
+	if (((uintptr_t)req->req_ptr <
+			data->client.user_virt_sb_base) ||
+		((uintptr_t)req->req_ptr >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("cmd buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+
+	if (((uintptr_t)req->resp_ptr <
+			data->client.user_virt_sb_base)  ||
+		((uintptr_t)req->resp_ptr >=
+		(data->client.user_virt_sb_base + data->client.sb_length))) {
+		pr_err("response buffer address not within shared bufffer\n");
+		return -EINVAL;
+	}
+
+	if ((req->req_len == 0) || (req->resp_len == 0)) {
+		pr_err("cmd buf lengtgh/response buf length not valid\n");
+		return -EINVAL;
+	}
+
+	if ((uintptr_t)req->req_ptr > (ULONG_MAX - req->req_len)) {
+		pr_err("Integer overflow in req_len & req_ptr\n");
+		return -EINVAL;
+	}
+
+	if ((uintptr_t)req->resp_ptr > (ULONG_MAX - req->resp_len)) {
+		pr_err("Integer overflow in resp_len & resp_ptr\n");
+		return -EINVAL;
+	}
+
+	if (data->client.user_virt_sb_base >
+					(ULONG_MAX - data->client.sb_length)) {
+		pr_err("Integer overflow in user_virt_sb_base & sb_length\n");
+		return -EINVAL;
+	}
+	if ((((uintptr_t)req->req_ptr + req->req_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+						data->client.sb_length)) ||
+		(((uintptr_t)req->resp_ptr + req->resp_len) >
+		((uintptr_t)data->client.user_virt_sb_base +
+						data->client.sb_length))) {
+		pr_err("cmd buf or resp buf is out of shared buffer region\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int __qseecom_qteec_handle_pre_alc_fd(struct qseecom_dev_handle *data,
+				uint32_t fd_idx, struct sg_table *sg_ptr)
+{
+	struct scatterlist *sg = sg_ptr->sgl;
+	struct qseecom_sg_entry *sg_entry;
+	void *buf;
+	uint i;
+	size_t size;
+	dma_addr_t coh_pmem;
+
+	if (fd_idx >= MAX_ION_FD) {
+		pr_err("fd_idx [%d] is invalid\n", fd_idx);
+		return -ENOMEM;
+	}
+	/*
+	 * Allocate a buffer, populate it with number of entry plus
+	 * each sg entry's phy addr and length; then return the
+	 * phy_addr of the buffer.
+	 */
+	size = sizeof(uint32_t) +
+		sizeof(struct qseecom_sg_entry) * sg_ptr->nents;
+	size = (size + PAGE_SIZE) & PAGE_MASK;
+	buf = dma_alloc_coherent(qseecom.pdev,
+			size, &coh_pmem, GFP_KERNEL);
+	if (buf == NULL) {
+		pr_err("failed to alloc memory for sg buf\n");
+		return -ENOMEM;
+	}
+	*(uint32_t *)buf = sg_ptr->nents;
+	sg_entry = (struct qseecom_sg_entry *) (buf + sizeof(uint32_t));
+	for (i = 0; i < sg_ptr->nents; i++) {
+		sg_entry->phys_addr = (uint32_t)sg_dma_address(sg);
+		sg_entry->len = sg->length;
+		sg_entry++;
+		sg = sg_next(sg);
+	}
+	data->client.sec_buf_fd[fd_idx].is_sec_buf_fd = true;
+	data->client.sec_buf_fd[fd_idx].vbase = buf;
+	data->client.sec_buf_fd[fd_idx].pbase = coh_pmem;
+	data->client.sec_buf_fd[fd_idx].size = size;
+	return 0;
+}
+
+static int __qseecom_update_qteec_req_buf(struct qseecom_qteec_modfd_req *req,
+			struct qseecom_dev_handle *data, bool cleanup)
+{
+	struct ion_handle *ihandle;
+	int ret = 0;
+	int i = 0;
+	uint32_t *update;
+	struct sg_table *sg_ptr = NULL;
+	struct scatterlist *sg;
+	struct qseecom_param_memref *memref;
+
+	if (req == NULL) {
+		pr_err("Invalid address\n");
+		return -EINVAL;
+	}
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (req->ifd_data[i].fd > 0) {
+			ihandle = ion_import_dma_buf_fd(qseecom.ion_clnt,
+					req->ifd_data[i].fd);
+			if (IS_ERR_OR_NULL(ihandle)) {
+				pr_err("Ion client can't retrieve the handle\n");
+				return -ENOMEM;
+			}
+			if ((req->req_len < sizeof(uint32_t)) ||
+				(req->ifd_data[i].cmd_buf_offset >
+				req->req_len - sizeof(uint32_t))) {
+				pr_err("Invalid offset/req len 0x%x/0x%x\n",
+					req->req_len,
+					req->ifd_data[i].cmd_buf_offset);
+				return -EINVAL;
+			}
+			update = (uint32_t *)((char *) req->req_ptr +
+				req->ifd_data[i].cmd_buf_offset);
+			if (!update) {
+				pr_err("update pointer is NULL\n");
+				return -EINVAL;
+			}
+		} else {
+			continue;
+		}
+		/* Populate the cmd data structure with the phys_addr */
+		sg_ptr = ion_sg_table(qseecom.ion_clnt, ihandle);
+		if (IS_ERR_OR_NULL(sg_ptr)) {
+			pr_err("IOn client could not retrieve sg table\n");
+			goto err;
+		}
+		sg = sg_ptr->sgl;
+		if (sg == NULL) {
+			pr_err("sg is NULL\n");
+			goto err;
+		}
+		if ((sg_ptr->nents == 0) || (sg->length == 0)) {
+			pr_err("Num of scat entr (%d)or length(%d) invalid\n",
+					sg_ptr->nents, sg->length);
+			goto err;
+		}
+		/* clean up buf for pre-allocated fd */
+		if (cleanup && data->client.sec_buf_fd[i].is_sec_buf_fd &&
+			(*update)) {
+			if (data->client.sec_buf_fd[i].vbase)
+				dma_free_coherent(qseecom.pdev,
+					data->client.sec_buf_fd[i].size,
+					data->client.sec_buf_fd[i].vbase,
+					data->client.sec_buf_fd[i].pbase);
+			memset((void *)update, 0,
+				sizeof(struct qseecom_param_memref));
+			memset(&(data->client.sec_buf_fd[i]), 0,
+				sizeof(struct qseecom_sec_buf_fd_info));
+			goto clean;
+		}
+
+		if (*update == 0) {
+			/* update buf for pre-allocated fd from secure heap*/
+			ret = __qseecom_qteec_handle_pre_alc_fd(data, i,
+				sg_ptr);
+			if (ret) {
+				pr_err("Failed to handle buf for fd[%d]\n", i);
+				goto err;
+			}
+			memref = (struct qseecom_param_memref *)update;
+			memref->buffer =
+				(uint32_t)(data->client.sec_buf_fd[i].pbase);
+			memref->size =
+				(uint32_t)(data->client.sec_buf_fd[i].size);
+		} else {
+			/* update buf for fd from non-secure qseecom heap */
+			if (sg_ptr->nents != 1) {
+				pr_err("Num of scat entr (%d) invalid\n",
+					sg_ptr->nents);
+				goto err;
+			}
+			if (cleanup)
+				*update = 0;
+			else
+				*update = (uint32_t)sg_dma_address(sg_ptr->sgl);
+		}
+clean:
+		if (cleanup) {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+				ihandle, NULL, sg->length,
+				ION_IOC_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+		} else {
+			ret = msm_ion_do_cache_op(qseecom.ion_clnt,
+				ihandle, NULL, sg->length,
+				ION_IOC_CLEAN_INV_CACHES);
+			if (ret) {
+				pr_err("cache operation failed %d\n", ret);
+				goto err;
+			}
+			data->sglistinfo_ptr[i].indexAndFlags =
+				SGLISTINFO_SET_INDEX_FLAG(
+				(sg_ptr->nents == 1), 0,
+				req->ifd_data[i].cmd_buf_offset);
+			data->sglistinfo_ptr[i].sizeOrCount =
+				(sg_ptr->nents == 1) ?
+				sg->length : sg_ptr->nents;
+			data->sglist_cnt = i + 1;
+		}
+		/* Deallocate the handle */
+		if (!IS_ERR_OR_NULL(ihandle))
+			ion_free(qseecom.ion_clnt, ihandle);
+	}
+	return ret;
+err:
+	if (!IS_ERR_OR_NULL(ihandle))
+		ion_free(qseecom.ion_clnt, ihandle);
+	return -ENOMEM;
+}
+
+static int __qseecom_qteec_issue_cmd(struct qseecom_dev_handle *data,
+				struct qseecom_qteec_req *req, uint32_t cmd_id)
+{
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_qteec_ireq ireq;
+	struct qseecom_qteec_64bit_ireq ireq_64bit;
+	struct qseecom_registered_app_list *ptr_app;
+	bool found_app = false;
+	unsigned long flags;
+	int ret = 0;
+	uint32_t reqd_len_sb_in = 0;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = data->sglistinfo_ptr;
+
+	ret  = __qseecom_qteec_validate_msg(data, req);
+	if (ret)
+		return ret;
+
+	/* find app_id & img_name from list */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+							list) {
+		if ((ptr_app->app_id == data->client.app_id) &&
+			 (!strcmp(ptr_app->app_name, data->client.app_name))) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+	if (!found_app) {
+		pr_err("app_id %d (%s) is not found\n", data->client.app_id,
+			(char *)data->client.app_name);
+		return -ENOENT;
+	}
+
+	if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
+			(cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
+		ret = __qseecom_update_qteec_req_buf(
+			(struct qseecom_qteec_modfd_req *)req, data, false);
+		if (ret)
+			return ret;
+	}
+
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		ireq.app_id = data->client.app_id;
+		ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req->req_ptr);
+		ireq.req_len = req->req_len;
+		ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req->resp_ptr);
+		ireq.resp_len = req->resp_len;
+		ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
+		ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+		cmd_buf = (void *)&ireq;
+		cmd_len = sizeof(struct qseecom_qteec_ireq);
+	} else {
+		ireq_64bit.app_id = data->client.app_id;
+		ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req->req_ptr);
+		ireq_64bit.req_len = req->req_len;
+		ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req->resp_ptr);
+		ireq_64bit.resp_len = req->resp_len;
+		if ((data->client.app_arch == ELFCLASS32) &&
+			((ireq_64bit.req_ptr >=
+				PHY_ADDR_4G - ireq_64bit.req_len) ||
+			(ireq_64bit.resp_ptr >=
+				PHY_ADDR_4G - ireq_64bit.resp_len))){
+			pr_err("32bit app %s (id: %d): phy_addr exceeds 4G\n",
+				data->client.app_name, data->client.app_id);
+			pr_err("req_ptr:%llx,req_len:%x,rsp_ptr:%llx,rsp_len:%x\n",
+				ireq_64bit.req_ptr, ireq_64bit.req_len,
+				ireq_64bit.resp_ptr, ireq_64bit.resp_len);
+			return -EFAULT;
+		}
+		ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
+		ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+		cmd_buf = (void *)&ireq_64bit;
+		cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
+	}
+	if (qseecom.whitelist_support == true
+		&& cmd_id == QSEOS_TEE_OPEN_SESSION)
+		*(uint32_t *)cmd_buf = QSEOS_TEE_OPEN_SESSION_WHITELIST;
+	else
+		*(uint32_t *)cmd_buf = cmd_id;
+
+	reqd_len_sb_in = req->req_len + req->resp_len;
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+					data->client.sb_virt,
+					reqd_len_sb_in,
+					ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	__qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				cmd_buf, cmd_len,
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+		return ret;
+	}
+
+	if (qseecom.qsee_reentrancy_support) {
+		ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+	} else {
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+						ret);
+				return ret;
+			}
+		} else {
+			if (resp.result != QSEOS_RESULT_SUCCESS) {
+				pr_err("Response result %d not supported\n",
+								resp.result);
+				ret = -EINVAL;
+			}
+		}
+	}
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+				data->client.sb_virt, data->client.sb_length,
+				ION_IOC_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	if ((cmd_id == QSEOS_TEE_OPEN_SESSION) ||
+			(cmd_id == QSEOS_TEE_REQUEST_CANCELLATION)) {
+		ret = __qseecom_update_qteec_req_buf(
+			(struct qseecom_qteec_modfd_req *)req, data, true);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+static int qseecom_qteec_open_session(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_qteec_modfd_req req;
+	int ret = 0;
+
+	ret = copy_from_user(&req, argp,
+				sizeof(struct qseecom_qteec_modfd_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
+							QSEOS_TEE_OPEN_SESSION);
+
+	return ret;
+}
+
+static int qseecom_qteec_close_session(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_qteec_req req;
+	int ret = 0;
+
+	ret = copy_from_user(&req, argp, sizeof(struct qseecom_qteec_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	ret = __qseecom_qteec_issue_cmd(data, &req, QSEOS_TEE_CLOSE_SESSION);
+	return ret;
+}
+
+static int qseecom_qteec_invoke_modfd_cmd(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_qteec_modfd_req req;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_qteec_ireq ireq;
+	struct qseecom_qteec_64bit_ireq ireq_64bit;
+	struct qseecom_registered_app_list *ptr_app;
+	bool found_app = false;
+	unsigned long flags;
+	int ret = 0;
+	int i = 0;
+	uint32_t reqd_len_sb_in = 0;
+	void *cmd_buf = NULL;
+	size_t cmd_len;
+	struct sglist_info *table = data->sglistinfo_ptr;
+	void *req_ptr = NULL;
+	void *resp_ptr = NULL;
+
+	ret = copy_from_user(&req, argp,
+			sizeof(struct qseecom_qteec_modfd_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	ret = __qseecom_qteec_validate_msg(data,
+					(struct qseecom_qteec_req *)(&req));
+	if (ret)
+		return ret;
+	req_ptr = req.req_ptr;
+	resp_ptr = req.resp_ptr;
+
+	/* find app_id & img_name from list */
+	spin_lock_irqsave(&qseecom.registered_app_list_lock, flags);
+	list_for_each_entry(ptr_app, &qseecom.registered_app_list_head,
+							list) {
+		if ((ptr_app->app_id == data->client.app_id) &&
+			 (!strcmp(ptr_app->app_name, data->client.app_name))) {
+			found_app = true;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&qseecom.registered_app_list_lock, flags);
+	if (!found_app) {
+		pr_err("app_id %d (%s) is not found\n", data->client.app_id,
+			(char *)data->client.app_name);
+		return -ENOENT;
+	}
+
+	/* validate offsets */
+	for (i = 0; i < MAX_ION_FD; i++) {
+		if (req.ifd_data[i].fd) {
+			if (req.ifd_data[i].cmd_buf_offset >= req.req_len)
+				return -EINVAL;
+		}
+	}
+	req.req_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req.req_ptr);
+	req.resp_ptr = (void *)__qseecom_uvirt_to_kvirt(data,
+						(uintptr_t)req.resp_ptr);
+	ret = __qseecom_update_qteec_req_buf(&req, data, false);
+	if (ret)
+		return ret;
+
+	if (qseecom.qsee_version < QSEE_VERSION_40) {
+		ireq.app_id = data->client.app_id;
+		ireq.req_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req_ptr);
+		ireq.req_len = req.req_len;
+		ireq.resp_ptr = (uint32_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)resp_ptr);
+		ireq.resp_len = req.resp_len;
+		cmd_buf = (void *)&ireq;
+		cmd_len = sizeof(struct qseecom_qteec_ireq);
+		ireq.sglistinfo_ptr = (uint32_t)virt_to_phys(table);
+		ireq.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+	} else {
+		ireq_64bit.app_id = data->client.app_id;
+		ireq_64bit.req_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)req_ptr);
+		ireq_64bit.req_len = req.req_len;
+		ireq_64bit.resp_ptr = (uint64_t)__qseecom_uvirt_to_kphys(data,
+						(uintptr_t)resp_ptr);
+		ireq_64bit.resp_len = req.resp_len;
+		cmd_buf = (void *)&ireq_64bit;
+		cmd_len = sizeof(struct qseecom_qteec_64bit_ireq);
+		ireq_64bit.sglistinfo_ptr = (uint64_t)virt_to_phys(table);
+		ireq_64bit.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+		dmac_flush_range((void *)table,
+				(void *)table + SGLISTINFO_TABLE_SIZE);
+	}
+	reqd_len_sb_in = req.req_len + req.resp_len;
+	if (qseecom.whitelist_support == true)
+		*(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND_WHITELIST;
+	else
+		*(uint32_t *)cmd_buf = QSEOS_TEE_INVOKE_COMMAND;
+
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+					data->client.sb_virt,
+					reqd_len_sb_in,
+					ION_IOC_CLEAN_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+
+	__qseecom_reentrancy_check_if_this_app_blocked(ptr_app);
+
+	ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+				cmd_buf, cmd_len,
+				&resp, sizeof(resp));
+	if (ret) {
+		pr_err("scm_call() failed with err: %d (app_id = %d)\n",
+					ret, data->client.app_id);
+		return ret;
+	}
+
+	if (qseecom.qsee_reentrancy_support) {
+		ret = __qseecom_process_reentrancy(&resp, ptr_app, data);
+	} else {
+		if (resp.result == QSEOS_RESULT_INCOMPLETE) {
+			ret = __qseecom_process_incomplete_cmd(data, &resp);
+			if (ret) {
+				pr_err("process_incomplete_cmd failed err: %d\n",
+						ret);
+				return ret;
+			}
+		} else {
+			if (resp.result != QSEOS_RESULT_SUCCESS) {
+				pr_err("Response result %d not supported\n",
+								resp.result);
+				ret = -EINVAL;
+			}
+		}
+	}
+	ret = __qseecom_update_qteec_req_buf(&req, data, true);
+	if (ret)
+		return ret;
+
+	ret = msm_ion_do_cache_op(qseecom.ion_clnt, data->client.ihandle,
+				data->client.sb_virt, data->client.sb_length,
+				ION_IOC_INV_CACHES);
+	if (ret) {
+		pr_err("cache operation failed %d\n", ret);
+		return ret;
+	}
+	return 0;
+}
+
+static int qseecom_qteec_request_cancellation(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_qteec_modfd_req req;
+	int ret = 0;
+
+	ret = copy_from_user(&req, argp,
+				sizeof(struct qseecom_qteec_modfd_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+	ret = __qseecom_qteec_issue_cmd(data, (struct qseecom_qteec_req *)&req,
+						QSEOS_TEE_REQUEST_CANCELLATION);
+
+	return ret;
+}
+
+static void __qseecom_clean_data_sglistinfo(struct qseecom_dev_handle *data)
+{
+	if (data->sglist_cnt) {
+		memset(data->sglistinfo_ptr, 0,
+			SGLISTINFO_TABLE_SIZE);
+		data->sglist_cnt = 0;
+	}
+}
+
+static inline long qseecom_ioctl(struct file *file,
+			unsigned int cmd, unsigned long arg)
+{
+	int ret = 0;
+	struct qseecom_dev_handle *data = file->private_data;
+	void __user *argp = (void __user *) arg;
+	bool perf_enabled = false;
+
+	if (!data) {
+		pr_err("Invalid/uninitialized device handle\n");
+		return -EINVAL;
+	}
+
+	if (data->abort) {
+		pr_err("Aborting qseecom driver\n");
+		return -ENODEV;
+	}
+
+	switch (cmd) {
+	case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("reg lstnr req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		pr_debug("ioctl register_listener_req()\n");
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		data->type = QSEECOM_LISTENER_SERVICE;
+		ret = qseecom_register_listener(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed qseecom_register_listener: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_UNREGISTER_LISTENER_REQ: {
+		if ((data->listener.id == 0) ||
+			(data->type != QSEECOM_LISTENER_SERVICE)) {
+			pr_err("unreg lstnr req: invalid handle (%d) lid(%d)\n",
+						data->type, data->listener.id);
+			ret = -EINVAL;
+			break;
+		}
+		pr_debug("ioctl unregister_listener_req()\n");
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_unregister_listener(data);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed qseecom_unregister_listener: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_CMD_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("send cmd req: invalid handle (%d) app_id(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		if (qseecom.support_bus_scaling) {
+			/* register bus bw in case the client doesn't do it */
+			if (!data->mode) {
+				mutex_lock(&qsee_bw_mutex);
+				__qseecom_register_bus_bandwidth_needs(
+								data, HIGH);
+				mutex_unlock(&qsee_bw_mutex);
+			}
+			ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
+			if (ret) {
+				pr_err("Failed to set bw.\n");
+				ret = -EINVAL;
+				mutex_unlock(&app_access_lock);
+				break;
+			}
+		}
+		/*
+		 * On targets where crypto clock is handled by HLOS,
+		 * if clk_access_cnt is zero and perf_enabled is false,
+		 * then the crypto clock was not enabled before sending cmd to
+		 * tz, qseecom will enable the clock to avoid service failure.
+		 */
+		if (!qseecom.no_clock_support &&
+			!qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
+			pr_debug("ce clock is not enabled!\n");
+			ret = qseecom_perf_enable(data);
+			if (ret) {
+				pr_err("Failed to vote for clock with err %d\n",
+						ret);
+				mutex_unlock(&app_access_lock);
+				ret = -EINVAL;
+				break;
+			}
+			perf_enabled = true;
+		}
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_send_cmd(data, argp);
+		if (qseecom.support_bus_scaling)
+			__qseecom_add_bw_scale_down_timer(
+				QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+		if (perf_enabled) {
+			qsee_disable_clock_vote(data, CLK_DFAB);
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		}
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed qseecom_send_cmd: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_MODFD_CMD_REQ:
+	case QSEECOM_IOCTL_SEND_MODFD_CMD_64_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("send mdfd cmd: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		if (qseecom.support_bus_scaling) {
+			if (!data->mode) {
+				mutex_lock(&qsee_bw_mutex);
+				__qseecom_register_bus_bandwidth_needs(
+								data, HIGH);
+				mutex_unlock(&qsee_bw_mutex);
+			}
+			ret = qseecom_scale_bus_bandwidth_timer(INACTIVE);
+			if (ret) {
+				pr_err("Failed to set bw.\n");
+				mutex_unlock(&app_access_lock);
+				ret = -EINVAL;
+				break;
+			}
+		}
+		/*
+		 * On targets where crypto clock is handled by HLOS,
+		 * if clk_access_cnt is zero and perf_enabled is false,
+		 * then the crypto clock was not enabled before sending cmd to
+		 * tz, qseecom will enable the clock to avoid service failure.
+		 */
+		if (!qseecom.no_clock_support &&
+			!qseecom.qsee.clk_access_cnt && !data->perf_enabled) {
+			pr_debug("ce clock is not enabled!\n");
+			ret = qseecom_perf_enable(data);
+			if (ret) {
+				pr_err("Failed to vote for clock with err %d\n",
+						ret);
+				mutex_unlock(&app_access_lock);
+				ret = -EINVAL;
+				break;
+			}
+			perf_enabled = true;
+		}
+		atomic_inc(&data->ioctl_count);
+		if (cmd == QSEECOM_IOCTL_SEND_MODFD_CMD_REQ)
+			ret = qseecom_send_modfd_cmd(data, argp);
+		else
+			ret = qseecom_send_modfd_cmd_64(data, argp);
+		if (qseecom.support_bus_scaling)
+			__qseecom_add_bw_scale_down_timer(
+				QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+		if (perf_enabled) {
+			qsee_disable_clock_vote(data, CLK_DFAB);
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		}
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed qseecom_send_cmd: %d\n", ret);
+		__qseecom_clean_data_sglistinfo(data);
+		break;
+	}
+	case QSEECOM_IOCTL_RECEIVE_REQ: {
+		if ((data->listener.id == 0) ||
+			(data->type != QSEECOM_LISTENER_SERVICE)) {
+			pr_err("receive req: invalid handle (%d), lid(%d)\n",
+						data->type, data->listener.id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_receive_req(data);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		if (ret && (ret != -ERESTARTSYS))
+			pr_err("failed qseecom_receive_req: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_RESP_REQ: {
+		if ((data->listener.id == 0) ||
+			(data->type != QSEECOM_LISTENER_SERVICE)) {
+			pr_err("send resp req: invalid handle (%d), lid(%d)\n",
+						data->type, data->listener.id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		if (!qseecom.qsee_reentrancy_support)
+			ret = qseecom_send_resp();
+		else
+			ret = qseecom_reentrancy_send_resp(data);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		if (ret)
+			pr_err("failed qseecom_send_resp: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_SET_MEM_PARAM_REQ: {
+		if ((data->type != QSEECOM_CLIENT_APP) &&
+			(data->type != QSEECOM_GENERIC) &&
+			(data->type != QSEECOM_SECURE_SERVICE)) {
+			pr_err("set mem param req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_set_client_mem_param(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed Qqseecom_set_mem_param request: %d\n",
+								ret);
+		break;
+	}
+	case QSEECOM_IOCTL_LOAD_APP_REQ: {
+		if ((data->type != QSEECOM_GENERIC) &&
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("load app req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->type = QSEECOM_CLIENT_APP;
+		pr_debug("LOAD_APP_REQ: qseecom_addr = 0x%pK\n", data);
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_load_app(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed load_app request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_UNLOAD_APP_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("unload app req:invalid handle(%d) app_id(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		pr_debug("UNLOAD_APP: qseecom_addr = 0x%pK\n", data);
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_unload_app(data, false);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed unload_app request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_GET_QSEOS_VERSION_REQ: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_get_qseos_version(data, argp);
+		if (ret)
+			pr_err("qseecom_get_qseos_version: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_PERF_ENABLE_REQ:{
+		if ((data->type != QSEECOM_GENERIC) &&
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("perf enable req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if ((data->type == QSEECOM_CLIENT_APP) &&
+			(data->client.app_id == 0)) {
+			pr_err("perf enable req:invalid handle(%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		if (qseecom.support_bus_scaling) {
+			mutex_lock(&qsee_bw_mutex);
+			__qseecom_register_bus_bandwidth_needs(data, HIGH);
+			mutex_unlock(&qsee_bw_mutex);
+		} else {
+			ret = qseecom_perf_enable(data);
+			if (ret)
+				pr_err("Fail to vote for clocks %d\n", ret);
+		}
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_PERF_DISABLE_REQ:{
+		if ((data->type != QSEECOM_SECURE_SERVICE) &&
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("perf disable req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if ((data->type == QSEECOM_CLIENT_APP) &&
+			(data->client.app_id == 0)) {
+			pr_err("perf disable: invalid handle (%d)app_id(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		if (!qseecom.support_bus_scaling) {
+			qsee_disable_clock_vote(data, CLK_DFAB);
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		} else {
+			mutex_lock(&qsee_bw_mutex);
+			qseecom_unregister_bus_bandwidth_needs(data);
+			mutex_unlock(&qsee_bw_mutex);
+		}
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+
+	case QSEECOM_IOCTL_SET_BUS_SCALING_REQ: {
+		/* If crypto clock is not handled by HLOS, return directly. */
+		if (qseecom.no_clock_support) {
+			pr_debug("crypto clock is not handled by HLOS\n");
+			break;
+		}
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("set bus scale: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_scale_bus_bandwidth(data, argp);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_LOAD_EXTERNAL_ELF_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("load ext elf req: invalid client handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->type = QSEECOM_UNAVAILABLE_CLIENT_APP;
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_load_external_elf(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed load_external_elf request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_UNLOAD_EXTERNAL_ELF_REQ: {
+		if (data->type != QSEECOM_UNAVAILABLE_CLIENT_APP) {
+			pr_err("unload ext elf req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_unload_external_elf(data);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed unload_app request: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_APP_LOADED_QUERY_REQ: {
+		data->type = QSEECOM_CLIENT_APP;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		pr_debug("APP_LOAD_QUERY: qseecom_addr = 0x%pK\n", data);
+		ret = qseecom_query_app_loaded(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_CMD_SERVICE_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("send cmd svc req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->type = QSEECOM_SECURE_SERVICE;
+		if (qseecom.qsee_version < QSEE_VERSION_03) {
+			pr_err("SEND_CMD_SERVICE_REQ: Invalid qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_send_service_cmd(data, argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_CREATE_KEY_REQ: {
+		if (!(qseecom.support_pfe || qseecom.support_fde))
+			pr_err("Features requiring key init not supported\n");
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("create key req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_05) {
+			pr_err("Create Key feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_create_key(data, argp);
+		if (ret)
+			pr_err("failed to create encryption key: %d\n", ret);
+
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_WIPE_KEY_REQ: {
+		if (!(qseecom.support_pfe || qseecom.support_fde))
+			pr_err("Features requiring key init not supported\n");
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("wipe key req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_05) {
+			pr_err("Wipe Key feature unsupported in qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_wipe_key(data, argp);
+		if (ret)
+			pr_err("failed to wipe encryption key: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_UPDATE_KEY_USER_INFO_REQ: {
+		if (!(qseecom.support_pfe || qseecom.support_fde))
+			pr_err("Features requiring key init not supported\n");
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("update key req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_05) {
+			pr_err("Update Key feature unsupported in qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_update_key_user_info(data, argp);
+		if (ret)
+			pr_err("failed to update key user info: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_SAVE_PARTITION_HASH_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("save part hash req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_save_partition_hash(argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_IS_ES_ACTIVATED_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("ES activated req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_is_es_activated(argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_MDTP_CIPHER_DIP_REQ: {
+		if (data->type != QSEECOM_GENERIC) {
+			pr_err("MDTP cipher DIP req: invalid handle (%d)\n",
+								data->type);
+			ret = -EINVAL;
+			break;
+		}
+		data->released = true;
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_mdtp_cipher_dip(argp);
+		atomic_dec(&data->ioctl_count);
+		mutex_unlock(&app_access_lock);
+		break;
+	}
+	case QSEECOM_IOCTL_SEND_MODFD_RESP:
+	case QSEECOM_IOCTL_SEND_MODFD_RESP_64: {
+		if ((data->listener.id == 0) ||
+			(data->type != QSEECOM_LISTENER_SERVICE)) {
+			pr_err("receive req: invalid handle (%d), lid(%d)\n",
+						data->type, data->listener.id);
+			ret = -EINVAL;
+			break;
+		}
+		atomic_inc(&data->ioctl_count);
+		if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
+			ret = qseecom_send_modfd_resp(data, argp);
+		else
+			ret = qseecom_send_modfd_resp_64(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		if (ret)
+			pr_err("failed qseecom_send_mod_resp: %d\n", ret);
+		__qseecom_clean_data_sglistinfo(data);
+		break;
+	}
+	case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("Open session: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			pr_err("GP feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_qteec_open_session(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed open_session_cmd: %d\n", ret);
+		__qseecom_clean_data_sglistinfo(data);
+		break;
+	}
+	case QSEECOM_QTEEC_IOCTL_CLOSE_SESSION_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("Close session: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			pr_err("GP feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_qteec_close_session(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed close_session_cmd: %d\n", ret);
+		break;
+	}
+	case QSEECOM_QTEEC_IOCTL_INVOKE_MODFD_CMD_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("Invoke cmd: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			pr_err("GP feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_qteec_invoke_modfd_cmd(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed Invoke cmd: %d\n", ret);
+		__qseecom_clean_data_sglistinfo(data);
+		break;
+	}
+	case QSEECOM_QTEEC_IOCTL_REQUEST_CANCELLATION_REQ: {
+		if ((data->client.app_id == 0) ||
+			(data->type != QSEECOM_CLIENT_APP)) {
+			pr_err("Cancel req: invalid handle (%d) appid(%d)\n",
+					data->type, data->client.app_id);
+			ret = -EINVAL;
+			break;
+		}
+		if (qseecom.qsee_version < QSEE_VERSION_40) {
+			pr_err("GP feature unsupported: qsee ver %u\n",
+				qseecom.qsee_version);
+			return -EINVAL;
+		}
+		/* Only one client allowed here at a time */
+		mutex_lock(&app_access_lock);
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_qteec_request_cancellation(data, argp);
+		atomic_dec(&data->ioctl_count);
+		wake_up_all(&data->abort_wq);
+		mutex_unlock(&app_access_lock);
+		if (ret)
+			pr_err("failed request_cancellation: %d\n", ret);
+		break;
+	}
+	case QSEECOM_IOCTL_GET_CE_PIPE_INFO: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_get_ce_info(data, argp);
+		if (ret)
+			pr_err("failed get fde ce pipe info: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_FREE_CE_PIPE_INFO: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_free_ce_info(data, argp);
+		if (ret)
+			pr_err("failed get fde ce pipe info: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	case QSEECOM_IOCTL_QUERY_CE_PIPE_INFO: {
+		atomic_inc(&data->ioctl_count);
+		ret = qseecom_query_ce_info(data, argp);
+		if (ret)
+			pr_err("failed get fde ce pipe info: %d\n", ret);
+		atomic_dec(&data->ioctl_count);
+		break;
+	}
+	default:
+		pr_err("Invalid IOCTL: 0x%x\n", cmd);
+		return -EINVAL;
+	}
+	return ret;
+}
+
+static int qseecom_open(struct inode *inode, struct file *file)
+{
+	int ret = 0;
+	struct qseecom_dev_handle *data;
+
+	data = kzalloc(sizeof(*data), GFP_KERNEL);
+	if (!data)
+		return -ENOMEM;
+	file->private_data = data;
+	data->abort = 0;
+	data->type = QSEECOM_GENERIC;
+	data->released = false;
+	memset((void *)data->client.app_name, 0, MAX_APP_NAME_SIZE);
+	data->mode = INACTIVE;
+	init_waitqueue_head(&data->abort_wq);
+	atomic_set(&data->ioctl_count, 0);
+	return ret;
+}
+
+static int qseecom_release(struct inode *inode, struct file *file)
+{
+	struct qseecom_dev_handle *data = file->private_data;
+	int ret = 0;
+
+	if (data->released == false) {
+		pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
+			data->type, data->mode, data);
+		switch (data->type) {
+		case QSEECOM_LISTENER_SERVICE:
+			mutex_lock(&app_access_lock);
+			ret = qseecom_unregister_listener(data);
+			mutex_unlock(&app_access_lock);
+			break;
+		case QSEECOM_CLIENT_APP:
+			mutex_lock(&app_access_lock);
+			ret = qseecom_unload_app(data, true);
+			mutex_unlock(&app_access_lock);
+			break;
+		case QSEECOM_SECURE_SERVICE:
+		case QSEECOM_GENERIC:
+			ret = qseecom_unmap_ion_allocated_memory(data);
+			if (ret)
+				pr_err("Ion Unmap failed\n");
+			break;
+		case QSEECOM_UNAVAILABLE_CLIENT_APP:
+			break;
+		default:
+			pr_err("Unsupported clnt_handle_type %d",
+				data->type);
+			break;
+		}
+	}
+
+	if (qseecom.support_bus_scaling) {
+		mutex_lock(&qsee_bw_mutex);
+		if (data->mode != INACTIVE) {
+			qseecom_unregister_bus_bandwidth_needs(data);
+			if (qseecom.cumulative_mode == INACTIVE) {
+				ret = __qseecom_set_msm_bus_request(INACTIVE);
+				if (ret)
+					pr_err("Fail to scale down bus\n");
+			}
+		}
+		mutex_unlock(&qsee_bw_mutex);
+	} else {
+		if (data->fast_load_enabled == true)
+			qsee_disable_clock_vote(data, CLK_SFPB);
+		if (data->perf_enabled == true)
+			qsee_disable_clock_vote(data, CLK_DFAB);
+	}
+	kfree(data);
+
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+#include "compat_qseecom.c"
+#else
+#define compat_qseecom_ioctl	NULL
+#endif
+
+static const struct file_operations qseecom_fops = {
+		.owner = THIS_MODULE,
+		.unlocked_ioctl = qseecom_ioctl,
+		.compat_ioctl = compat_qseecom_ioctl,
+		.open = qseecom_open,
+		.release = qseecom_release
+};
+
+static int __qseecom_init_clk(enum qseecom_ce_hw_instance ce)
+{
+	int rc = 0;
+	struct device *pdev;
+	struct qseecom_clk *qclk;
+	char *core_clk_src = NULL;
+	char *core_clk = NULL;
+	char *iface_clk = NULL;
+	char *bus_clk = NULL;
+
+	switch (ce) {
+	case CLK_QSEE: {
+		core_clk_src = "core_clk_src";
+		core_clk = "core_clk";
+		iface_clk = "iface_clk";
+		bus_clk = "bus_clk";
+		qclk = &qseecom.qsee;
+		qclk->instance = CLK_QSEE;
+		break;
+	};
+	case CLK_CE_DRV: {
+		core_clk_src = "ce_drv_core_clk_src";
+		core_clk = "ce_drv_core_clk";
+		iface_clk = "ce_drv_iface_clk";
+		bus_clk = "ce_drv_bus_clk";
+		qclk = &qseecom.ce_drv;
+		qclk->instance = CLK_CE_DRV;
+		break;
+	};
+	default:
+		pr_err("Invalid ce hw instance: %d!\n", ce);
+		return -EIO;
+	}
+
+	if (qseecom.no_clock_support) {
+		qclk->ce_core_clk = NULL;
+		qclk->ce_clk = NULL;
+		qclk->ce_bus_clk = NULL;
+		qclk->ce_core_src_clk = NULL;
+		return 0;
+	}
+
+	pdev = qseecom.pdev;
+
+	/* Get CE3 src core clk. */
+	qclk->ce_core_src_clk = clk_get(pdev, core_clk_src);
+	if (!IS_ERR(qclk->ce_core_src_clk)) {
+		rc = clk_set_rate(qclk->ce_core_src_clk,
+					qseecom.ce_opp_freq_hz);
+		if (rc) {
+			clk_put(qclk->ce_core_src_clk);
+			qclk->ce_core_src_clk = NULL;
+			pr_err("Unable to set the core src clk @%uMhz.\n",
+				qseecom.ce_opp_freq_hz/CE_CLK_DIV);
+			return -EIO;
+		}
+	} else {
+		pr_warn("Unable to get CE core src clk, set to NULL\n");
+		qclk->ce_core_src_clk = NULL;
+	}
+
+	/* Get CE core clk */
+	qclk->ce_core_clk = clk_get(pdev, core_clk);
+	if (IS_ERR(qclk->ce_core_clk)) {
+		rc = PTR_ERR(qclk->ce_core_clk);
+		pr_err("Unable to get CE core clk\n");
+		if (qclk->ce_core_src_clk != NULL)
+			clk_put(qclk->ce_core_src_clk);
+		return -EIO;
+	}
+
+	/* Get CE Interface clk */
+	qclk->ce_clk = clk_get(pdev, iface_clk);
+	if (IS_ERR(qclk->ce_clk)) {
+		rc = PTR_ERR(qclk->ce_clk);
+		pr_err("Unable to get CE interface clk\n");
+		if (qclk->ce_core_src_clk != NULL)
+			clk_put(qclk->ce_core_src_clk);
+		clk_put(qclk->ce_core_clk);
+		return -EIO;
+	}
+
+	/* Get CE AXI clk */
+	qclk->ce_bus_clk = clk_get(pdev, bus_clk);
+	if (IS_ERR(qclk->ce_bus_clk)) {
+		rc = PTR_ERR(qclk->ce_bus_clk);
+		pr_err("Unable to get CE BUS interface clk\n");
+		if (qclk->ce_core_src_clk != NULL)
+			clk_put(qclk->ce_core_src_clk);
+		clk_put(qclk->ce_core_clk);
+		clk_put(qclk->ce_clk);
+		return -EIO;
+	}
+
+	return rc;
+}
+
+static void __qseecom_deinit_clk(enum qseecom_ce_hw_instance ce)
+{
+	struct qseecom_clk *qclk;
+
+	if (ce == CLK_QSEE)
+		qclk = &qseecom.qsee;
+	else
+		qclk = &qseecom.ce_drv;
+
+	if (qclk->ce_clk != NULL) {
+		clk_put(qclk->ce_clk);
+		qclk->ce_clk = NULL;
+	}
+	if (qclk->ce_core_clk != NULL) {
+		clk_put(qclk->ce_core_clk);
+		qclk->ce_core_clk = NULL;
+	}
+	if (qclk->ce_bus_clk != NULL) {
+		clk_put(qclk->ce_bus_clk);
+		qclk->ce_bus_clk = NULL;
+	}
+	if (qclk->ce_core_src_clk != NULL) {
+		clk_put(qclk->ce_core_src_clk);
+		qclk->ce_core_src_clk = NULL;
+	}
+	qclk->instance = CLK_INVALID;
+}
+
+static int qseecom_retrieve_ce_data(struct platform_device *pdev)
+{
+	int rc = 0;
+	uint32_t hlos_num_ce_hw_instances;
+	uint32_t disk_encrypt_pipe;
+	uint32_t file_encrypt_pipe;
+	uint32_t hlos_ce_hw_instance[MAX_CE_PIPE_PAIR_PER_UNIT];
+	int i;
+	const int *tbl;
+	int size;
+	int entry;
+	struct qseecom_crypto_info *pfde_tbl = NULL;
+	struct qseecom_crypto_info *p;
+	int tbl_size;
+	int j;
+	bool old_db = true;
+	struct qseecom_ce_info_use *pce_info_use;
+	uint32_t *unit_tbl = NULL;
+	int total_units = 0;
+	struct qseecom_ce_pipe_entry *pce_entry;
+
+	qseecom.ce_info.fde = qseecom.ce_info.pfe = NULL;
+	qseecom.ce_info.num_fde = qseecom.ce_info.num_pfe = 0;
+
+	if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,qsee-ce-hw-instance",
+				&qseecom.ce_info.qsee_ce_hw_instance)) {
+		pr_err("Fail to get qsee ce hw instance information.\n");
+		rc = -EINVAL;
+		goto out;
+	} else {
+		pr_debug("qsee-ce-hw-instance=0x%x\n",
+			qseecom.ce_info.qsee_ce_hw_instance);
+	}
+
+	qseecom.support_fde = of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,support-fde");
+	qseecom.support_pfe = of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,support-pfe");
+
+	if (!qseecom.support_pfe && !qseecom.support_fde) {
+		pr_warn("Device does not support PFE/FDE");
+		goto out;
+	}
+
+	if (qseecom.support_fde)
+		tbl = of_get_property((&pdev->dev)->of_node,
+			"qcom,full-disk-encrypt-info", &size);
+	else
+		tbl = NULL;
+	if (tbl) {
+		old_db = false;
+		if (size % sizeof(struct qseecom_crypto_info)) {
+			pr_err("full-disk-encrypt-info tbl size(%d)\n",
+				size);
+			rc = -EINVAL;
+			goto out;
+		}
+		tbl_size = size / sizeof
+				(struct qseecom_crypto_info);
+
+		pfde_tbl = kzalloc(size, GFP_KERNEL);
+		unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
+		total_units = 0;
+
+		if (!pfde_tbl || !unit_tbl) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+		if (of_property_read_u32_array((&pdev->dev)->of_node,
+			"qcom,full-disk-encrypt-info",
+			(u32 *)pfde_tbl, size/sizeof(u32))) {
+			pr_err("failed to read full-disk-encrypt-info tbl\n");
+			rc = -EINVAL;
+			goto out;
+		}
+
+		for (i = 0, p = pfde_tbl;  i < tbl_size; i++, p++) {
+			for (j = 0; j < total_units; j++) {
+				if (p->unit_num == *(unit_tbl + j))
+					break;
+			}
+			if (j == total_units) {
+				*(unit_tbl + total_units) = p->unit_num;
+				total_units++;
+			}
+		}
+
+		qseecom.ce_info.num_fde = total_units;
+		pce_info_use = qseecom.ce_info.fde = kcalloc(
+			total_units, sizeof(struct qseecom_ce_info_use),
+				GFP_KERNEL);
+		if (!pce_info_use) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		for (j = 0; j < total_units; j++, pce_info_use++) {
+			pce_info_use->unit_num = *(unit_tbl + j);
+			pce_info_use->alloc = false;
+			pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
+			pce_info_use->num_ce_pipe_entries = 0;
+			pce_info_use->ce_pipe_entry = NULL;
+			for (i = 0, p = pfde_tbl;  i < tbl_size; i++, p++) {
+				if (p->unit_num == pce_info_use->unit_num)
+					pce_info_use->num_ce_pipe_entries++;
+			}
+
+			entry = pce_info_use->num_ce_pipe_entries;
+			pce_entry = pce_info_use->ce_pipe_entry =
+				kcalloc(entry,
+					sizeof(struct qseecom_ce_pipe_entry),
+					GFP_KERNEL);
+			if (pce_entry == NULL) {
+				pr_err("failed to alloc memory\n");
+				rc = -ENOMEM;
+				goto out;
+			}
+
+			for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+				if (p->unit_num == pce_info_use->unit_num) {
+					pce_entry->ce_num = p->ce;
+					pce_entry->ce_pipe_pair =
+							p->pipe_pair;
+					pce_entry->valid = true;
+					pce_entry++;
+				}
+			}
+		}
+		kfree(unit_tbl);
+		unit_tbl = NULL;
+		kfree(pfde_tbl);
+		pfde_tbl = NULL;
+	}
+
+	if (qseecom.support_pfe)
+		tbl = of_get_property((&pdev->dev)->of_node,
+			"qcom,per-file-encrypt-info", &size);
+	else
+		tbl = NULL;
+	if (tbl) {
+		old_db = false;
+		if (size % sizeof(struct qseecom_crypto_info)) {
+			pr_err("per-file-encrypt-info tbl size(%d)\n",
+				size);
+			rc = -EINVAL;
+			goto out;
+		}
+		tbl_size = size / sizeof
+				(struct qseecom_crypto_info);
+
+		pfde_tbl = kzalloc(size, GFP_KERNEL);
+		unit_tbl = kcalloc(tbl_size, sizeof(int), GFP_KERNEL);
+		total_units = 0;
+		if (!pfde_tbl || !unit_tbl) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+		if (of_property_read_u32_array((&pdev->dev)->of_node,
+			"qcom,per-file-encrypt-info",
+			(u32 *)pfde_tbl, size/sizeof(u32))) {
+			pr_err("failed to read per-file-encrypt-info tbl\n");
+			rc = -EINVAL;
+			goto out;
+		}
+
+		for (i = 0, p = pfde_tbl;  i < tbl_size; i++, p++) {
+			for (j = 0; j < total_units; j++) {
+				if (p->unit_num == *(unit_tbl + j))
+					break;
+			}
+			if (j == total_units) {
+				*(unit_tbl + total_units) = p->unit_num;
+				total_units++;
+			}
+		}
+
+		qseecom.ce_info.num_pfe = total_units;
+		pce_info_use = qseecom.ce_info.pfe = kcalloc(
+			total_units, sizeof(struct qseecom_ce_info_use),
+				GFP_KERNEL);
+		if (!pce_info_use) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+
+		for (j = 0; j < total_units; j++, pce_info_use++) {
+			pce_info_use->unit_num = *(unit_tbl + j);
+			pce_info_use->alloc = false;
+			pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
+			pce_info_use->num_ce_pipe_entries = 0;
+			pce_info_use->ce_pipe_entry = NULL;
+			for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+				if (p->unit_num == pce_info_use->unit_num)
+					pce_info_use->num_ce_pipe_entries++;
+			}
+
+			entry = pce_info_use->num_ce_pipe_entries;
+			pce_entry = pce_info_use->ce_pipe_entry =
+				kcalloc(entry,
+					sizeof(struct qseecom_ce_pipe_entry),
+					GFP_KERNEL);
+			if (pce_entry == NULL) {
+				pr_err("failed to alloc memory\n");
+				rc = -ENOMEM;
+				goto out;
+			}
+
+			for (i = 0, p = pfde_tbl; i < tbl_size; i++, p++) {
+				if (p->unit_num == pce_info_use->unit_num) {
+					pce_entry->ce_num = p->ce;
+					pce_entry->ce_pipe_pair =
+							p->pipe_pair;
+					pce_entry->valid = true;
+					pce_entry++;
+				}
+			}
+		}
+		kfree(unit_tbl);
+		unit_tbl = NULL;
+		kfree(pfde_tbl);
+		pfde_tbl = NULL;
+	}
+
+	if (!old_db)
+		goto out1;
+
+	if (of_property_read_bool((&pdev->dev)->of_node,
+			"qcom,support-multiple-ce-hw-instance")) {
+		if (of_property_read_u32((&pdev->dev)->of_node,
+			"qcom,hlos-num-ce-hw-instances",
+				&hlos_num_ce_hw_instances)) {
+			pr_err("Fail: get hlos number of ce hw instance\n");
+			rc = -EINVAL;
+			goto out;
+		}
+	} else {
+		hlos_num_ce_hw_instances = 1;
+	}
+
+	if (hlos_num_ce_hw_instances > MAX_CE_PIPE_PAIR_PER_UNIT) {
+		pr_err("Fail: hlos number of ce hw instance exceeds %d\n",
+			MAX_CE_PIPE_PAIR_PER_UNIT);
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (of_property_read_u32_array((&pdev->dev)->of_node,
+			"qcom,hlos-ce-hw-instance", hlos_ce_hw_instance,
+			hlos_num_ce_hw_instances)) {
+		pr_err("Fail: get hlos ce hw instance info\n");
+		rc = -EINVAL;
+		goto out;
+	}
+
+	if (qseecom.support_fde) {
+		pce_info_use = qseecom.ce_info.fde =
+			kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
+		if (!pce_info_use) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+		/* by default for old db */
+		qseecom.ce_info.num_fde = DEFAULT_NUM_CE_INFO_UNIT;
+		pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
+		pce_info_use->alloc = false;
+		pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_FDE;
+		pce_info_use->ce_pipe_entry = NULL;
+		if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,disk-encrypt-pipe-pair",
+				&disk_encrypt_pipe)) {
+			pr_err("Fail to get FDE pipe information.\n");
+			rc = -EINVAL;
+				goto out;
+		} else {
+			pr_debug("disk-encrypt-pipe-pair=0x%x",
+				disk_encrypt_pipe);
+		}
+		entry = pce_info_use->num_ce_pipe_entries =
+				hlos_num_ce_hw_instances;
+		pce_entry = pce_info_use->ce_pipe_entry =
+			kcalloc(entry,
+				sizeof(struct qseecom_ce_pipe_entry),
+				GFP_KERNEL);
+		if (pce_entry == NULL) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+		for (i = 0; i < entry; i++) {
+			pce_entry->ce_num = hlos_ce_hw_instance[i];
+			pce_entry->ce_pipe_pair = disk_encrypt_pipe;
+			pce_entry->valid = 1;
+			pce_entry++;
+		}
+	} else {
+		pr_warn("Device does not support FDE");
+		disk_encrypt_pipe = 0xff;
+	}
+	if (qseecom.support_pfe) {
+		pce_info_use = qseecom.ce_info.pfe =
+			kzalloc(sizeof(struct qseecom_ce_info_use), GFP_KERNEL);
+		if (!pce_info_use) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+		/* by default for old db */
+		qseecom.ce_info.num_pfe = DEFAULT_NUM_CE_INFO_UNIT;
+		pce_info_use->unit_num = DEFAULT_CE_INFO_UNIT;
+		pce_info_use->alloc = false;
+		pce_info_use->type = CE_PIPE_PAIR_USE_TYPE_PFE;
+		pce_info_use->ce_pipe_entry = NULL;
+
+		if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,file-encrypt-pipe-pair",
+				&file_encrypt_pipe)) {
+			pr_err("Fail to get PFE pipe information.\n");
+			rc = -EINVAL;
+			goto out;
+		} else {
+			pr_debug("file-encrypt-pipe-pair=0x%x",
+				file_encrypt_pipe);
+		}
+		entry = pce_info_use->num_ce_pipe_entries =
+						hlos_num_ce_hw_instances;
+		pce_entry = pce_info_use->ce_pipe_entry =
+			kcalloc(entry,
+				sizeof(struct qseecom_ce_pipe_entry),
+				GFP_KERNEL);
+		if (pce_entry == NULL) {
+			pr_err("failed to alloc memory\n");
+			rc = -ENOMEM;
+			goto out;
+		}
+		for (i = 0; i < entry; i++) {
+			pce_entry->ce_num = hlos_ce_hw_instance[i];
+			pce_entry->ce_pipe_pair = file_encrypt_pipe;
+			pce_entry->valid = 1;
+			pce_entry++;
+		}
+	} else {
+		pr_warn("Device does not support PFE");
+		file_encrypt_pipe = 0xff;
+	}
+
+out1:
+	qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
+	qseecom.ce_drv.instance = hlos_ce_hw_instance[0];
+out:
+	if (rc) {
+		if (qseecom.ce_info.fde) {
+			pce_info_use = qseecom.ce_info.fde;
+			for (i = 0; i < qseecom.ce_info.num_fde; i++) {
+				pce_entry = pce_info_use->ce_pipe_entry;
+				kfree(pce_entry);
+				pce_info_use++;
+			}
+		}
+		kfree(qseecom.ce_info.fde);
+		qseecom.ce_info.fde = NULL;
+		if (qseecom.ce_info.pfe) {
+			pce_info_use = qseecom.ce_info.pfe;
+			for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
+				pce_entry = pce_info_use->ce_pipe_entry;
+				kfree(pce_entry);
+				pce_info_use++;
+			}
+		}
+		kfree(qseecom.ce_info.pfe);
+		qseecom.ce_info.pfe = NULL;
+	}
+	kfree(unit_tbl);
+	kfree(pfde_tbl);
+	return rc;
+}
+
+static int qseecom_get_ce_info(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_ce_info_req req;
+	struct qseecom_ce_info_req *pinfo = &req;
+	int ret = 0;
+	int i;
+	unsigned int entries;
+	struct qseecom_ce_info_use *pce_info_use, *p;
+	int total = 0;
+	bool found = false;
+	struct qseecom_ce_pipe_entry *pce_entry;
+
+	ret = copy_from_user(pinfo, argp,
+				sizeof(struct qseecom_ce_info_req));
+	if (ret) {
+		pr_err("copy_from_user failed\n");
+		return ret;
+	}
+
+	switch (pinfo->usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		if (qseecom.support_fde) {
+			p = qseecom.ce_info.fde;
+			total = qseecom.ce_info.num_fde;
+		} else {
+			pr_err("system does not support fde\n");
+			return -EINVAL;
+		}
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		if (qseecom.support_pfe) {
+			p = qseecom.ce_info.pfe;
+			total = qseecom.ce_info.num_pfe;
+		} else {
+			pr_err("system does not support pfe\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("unsupported usage %d\n", pinfo->usage);
+		return -EINVAL;
+	}
+
+	pce_info_use = NULL;
+	for (i = 0; i < total; i++) {
+		if (!p->alloc)
+			pce_info_use = p;
+		else if (!memcmp(p->handle, pinfo->handle,
+						MAX_CE_INFO_HANDLE_SIZE)) {
+			pce_info_use = p;
+			found = true;
+			break;
+		}
+		p++;
+	}
+
+	if (pce_info_use == NULL)
+		return -EBUSY;
+
+	pinfo->unit_num = pce_info_use->unit_num;
+	if (!pce_info_use->alloc) {
+		pce_info_use->alloc = true;
+		memcpy(pce_info_use->handle,
+			pinfo->handle, MAX_CE_INFO_HANDLE_SIZE);
+	}
+	if (pce_info_use->num_ce_pipe_entries >
+					MAX_CE_PIPE_PAIR_PER_UNIT)
+		entries = MAX_CE_PIPE_PAIR_PER_UNIT;
+	else
+		entries = pce_info_use->num_ce_pipe_entries;
+	pinfo->num_ce_pipe_entries = entries;
+	pce_entry = pce_info_use->ce_pipe_entry;
+	for (i = 0; i < entries; i++, pce_entry++)
+		pinfo->ce_pipe_entry[i] = *pce_entry;
+	for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
+		pinfo->ce_pipe_entry[i].valid = 0;
+
+	if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
+		pr_err("copy_to_user failed\n");
+		ret = -EFAULT;
+	}
+	return ret;
+}
+
+static int qseecom_free_ce_info(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_ce_info_req req;
+	struct qseecom_ce_info_req *pinfo = &req;
+	int ret = 0;
+	struct qseecom_ce_info_use *p;
+	int total = 0;
+	int i;
+	bool found = false;
+
+	ret = copy_from_user(pinfo, argp,
+				sizeof(struct qseecom_ce_info_req));
+	if (ret)
+		return ret;
+
+	switch (pinfo->usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		if (qseecom.support_fde) {
+			p = qseecom.ce_info.fde;
+			total = qseecom.ce_info.num_fde;
+		} else {
+			pr_err("system does not support fde\n");
+			return -EINVAL;
+		}
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		if (qseecom.support_pfe) {
+			p = qseecom.ce_info.pfe;
+			total = qseecom.ce_info.num_pfe;
+		} else {
+			pr_err("system does not support pfe\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("unsupported usage %d\n", pinfo->usage);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < total; i++) {
+		if (p->alloc &&
+			!memcmp(p->handle, pinfo->handle,
+					MAX_CE_INFO_HANDLE_SIZE)) {
+			memset(p->handle, 0, MAX_CE_INFO_HANDLE_SIZE);
+			p->alloc = false;
+			found = true;
+			break;
+		}
+		p++;
+	}
+	return ret;
+}
+
+static int qseecom_query_ce_info(struct qseecom_dev_handle *data,
+				void __user *argp)
+{
+	struct qseecom_ce_info_req req;
+	struct qseecom_ce_info_req *pinfo = &req;
+	int ret = 0;
+	int i;
+	unsigned int entries;
+	struct qseecom_ce_info_use *pce_info_use, *p;
+	int total = 0;
+	bool found = false;
+	struct qseecom_ce_pipe_entry *pce_entry;
+
+	ret = copy_from_user(pinfo, argp,
+				sizeof(struct qseecom_ce_info_req));
+	if (ret)
+		return ret;
+
+	switch (pinfo->usage) {
+	case QSEOS_KM_USAGE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_UFS_ICE_DISK_ENCRYPTION:
+	case QSEOS_KM_USAGE_SDCC_ICE_DISK_ENCRYPTION:
+		if (qseecom.support_fde) {
+			p = qseecom.ce_info.fde;
+			total = qseecom.ce_info.num_fde;
+		} else {
+			pr_err("system does not support fde\n");
+			return -EINVAL;
+		}
+		break;
+	case QSEOS_KM_USAGE_FILE_ENCRYPTION:
+		if (qseecom.support_pfe) {
+			p = qseecom.ce_info.pfe;
+			total = qseecom.ce_info.num_pfe;
+		} else {
+			pr_err("system does not support pfe\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		pr_err("unsupported usage %d\n", pinfo->usage);
+		return -EINVAL;
+	}
+
+	pce_info_use = NULL;
+	pinfo->unit_num = INVALID_CE_INFO_UNIT_NUM;
+	pinfo->num_ce_pipe_entries  = 0;
+	for (i = 0; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
+		pinfo->ce_pipe_entry[i].valid = 0;
+
+	for (i = 0; i < total; i++) {
+
+		if (p->alloc && !memcmp(p->handle,
+				pinfo->handle, MAX_CE_INFO_HANDLE_SIZE)) {
+			pce_info_use = p;
+			found = true;
+			break;
+		}
+		p++;
+	}
+	if (!pce_info_use)
+		goto out;
+	pinfo->unit_num = pce_info_use->unit_num;
+	if (pce_info_use->num_ce_pipe_entries >
+					MAX_CE_PIPE_PAIR_PER_UNIT)
+		entries = MAX_CE_PIPE_PAIR_PER_UNIT;
+	else
+		entries = pce_info_use->num_ce_pipe_entries;
+	pinfo->num_ce_pipe_entries = entries;
+	pce_entry = pce_info_use->ce_pipe_entry;
+	for (i = 0; i < entries; i++, pce_entry++)
+		pinfo->ce_pipe_entry[i] = *pce_entry;
+	for (; i < MAX_CE_PIPE_PAIR_PER_UNIT; i++)
+		pinfo->ce_pipe_entry[i].valid = 0;
+out:
+	if (copy_to_user(argp, pinfo, sizeof(struct qseecom_ce_info_req))) {
+		pr_err("copy_to_user failed\n");
+		ret = -EFAULT;
+	}
+	return ret;
+}
+
+/*
+ * Check whitelist feature, and if TZ feature version is < 1.0.0,
+ * then whitelist feature is not supported.
+ */
+static int qseecom_check_whitelist_feature(void)
+{
+	int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
+
+	return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
+}
+
+static int qseecom_probe(struct platform_device *pdev)
+{
+	int rc;
+	int i;
+	uint32_t feature = 10;
+	struct device *class_dev;
+	struct msm_bus_scale_pdata *qseecom_platform_support = NULL;
+	struct qseecom_command_scm_resp resp;
+	struct qseecom_ce_info_use *pce_info_use = NULL;
+
+	qseecom.qsee_bw_count = 0;
+	qseecom.qsee_perf_client = 0;
+	qseecom.qsee_sfpb_bw_count = 0;
+
+	qseecom.qsee.ce_core_clk = NULL;
+	qseecom.qsee.ce_clk = NULL;
+	qseecom.qsee.ce_core_src_clk = NULL;
+	qseecom.qsee.ce_bus_clk = NULL;
+
+	qseecom.cumulative_mode = 0;
+	qseecom.current_mode = INACTIVE;
+	qseecom.support_bus_scaling = false;
+	qseecom.support_fde = false;
+	qseecom.support_pfe = false;
+
+	qseecom.ce_drv.ce_core_clk = NULL;
+	qseecom.ce_drv.ce_clk = NULL;
+	qseecom.ce_drv.ce_core_src_clk = NULL;
+	qseecom.ce_drv.ce_bus_clk = NULL;
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
+
+	qseecom.app_block_ref_cnt = 0;
+	init_waitqueue_head(&qseecom.app_block_wq);
+	qseecom.whitelist_support = true;
+
+	rc = alloc_chrdev_region(&qseecom_device_no, 0, 1, QSEECOM_DEV);
+	if (rc < 0) {
+		pr_err("alloc_chrdev_region failed %d\n", rc);
+		return rc;
+	}
+
+	driver_class = class_create(THIS_MODULE, QSEECOM_DEV);
+	if (IS_ERR(driver_class)) {
+		rc = -ENOMEM;
+		pr_err("class_create failed %d\n", rc);
+		goto exit_unreg_chrdev_region;
+	}
+
+	class_dev = device_create(driver_class, NULL, qseecom_device_no, NULL,
+			QSEECOM_DEV);
+	if (IS_ERR(class_dev)) {
+		pr_err("class_device_create failed %d\n", rc);
+		rc = -ENOMEM;
+		goto exit_destroy_class;
+	}
+
+	cdev_init(&qseecom.cdev, &qseecom_fops);
+	qseecom.cdev.owner = THIS_MODULE;
+
+	rc = cdev_add(&qseecom.cdev, MKDEV(MAJOR(qseecom_device_no), 0), 1);
+	if (rc < 0) {
+		pr_err("cdev_add failed %d\n", rc);
+		goto exit_destroy_device;
+	}
+
+	INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
+	spin_lock_init(&qseecom.registered_listener_list_lock);
+	INIT_LIST_HEAD(&qseecom.registered_app_list_head);
+	spin_lock_init(&qseecom.registered_app_list_lock);
+	INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
+	spin_lock_init(&qseecom.registered_kclient_list_lock);
+	init_waitqueue_head(&qseecom.send_resp_wq);
+	qseecom.send_resp_flag = 0;
+
+	qseecom.qsee_version = QSEEE_VERSION_00;
+	rc = qseecom_scm_call(6, 3, &feature, sizeof(feature),
+		&resp, sizeof(resp));
+	pr_info("qseecom.qsee_version = 0x%x\n", resp.result);
+	if (rc) {
+		pr_err("Failed to get QSEE version info %d\n", rc);
+		goto exit_del_cdev;
+	}
+	qseecom.qsee_version = resp.result;
+	qseecom.qseos_version = QSEOS_VERSION_14;
+	qseecom.commonlib_loaded = false;
+	qseecom.commonlib64_loaded = false;
+	qseecom.pdev = class_dev;
+	/* Create ION msm client */
+	qseecom.ion_clnt = msm_ion_client_create("qseecom-kernel");
+	if (IS_ERR_OR_NULL(qseecom.ion_clnt)) {
+		pr_err("Ion client cannot be created\n");
+		rc = -ENOMEM;
+		goto exit_del_cdev;
+	}
+
+	/* register client for bus scaling */
+	if (pdev->dev.of_node) {
+		qseecom.pdev->of_node = pdev->dev.of_node;
+		qseecom.support_bus_scaling =
+				of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,support-bus-scaling");
+		rc = qseecom_retrieve_ce_data(pdev);
+		if (rc)
+			goto exit_destroy_ion_client;
+		qseecom.appsbl_qseecom_support =
+				of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,appsbl-qseecom-support");
+		pr_debug("qseecom.appsbl_qseecom_support = 0x%x",
+				qseecom.appsbl_qseecom_support);
+
+		qseecom.commonlib64_loaded =
+				of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,commonlib64-loaded-by-uefi");
+		pr_debug("qseecom.commonlib64-loaded-by-uefi = 0x%x",
+				qseecom.commonlib64_loaded);
+		qseecom.fde_key_size =
+			of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,fde-key-size");
+		qseecom.no_clock_support =
+				of_property_read_bool((&pdev->dev)->of_node,
+						"qcom,no-clock-support");
+		if (!qseecom.no_clock_support) {
+			pr_info("qseecom clocks handled by other subsystem\n");
+		} else {
+			pr_info("no-clock-support=0x%x",
+			qseecom.no_clock_support);
+		}
+
+		if (of_property_read_u32((&pdev->dev)->of_node,
+					"qcom,qsee-reentrancy-support",
+					&qseecom.qsee_reentrancy_support)) {
+			pr_warn("qsee reentrancy support phase is not defined, setting to default 0\n");
+			qseecom.qsee_reentrancy_support = 0;
+		} else {
+			pr_warn("qseecom.qsee_reentrancy_support = %d\n",
+				qseecom.qsee_reentrancy_support);
+		}
+
+		/*
+		 * The qseecom bus scaling flag can not be enabled when
+		 * crypto clock is not handled by HLOS.
+		 */
+		if (qseecom.no_clock_support && qseecom.support_bus_scaling) {
+			pr_err("support_bus_scaling flag can not be enabled.\n");
+			rc = -EINVAL;
+			goto exit_destroy_ion_client;
+		}
+
+		if (of_property_read_u32((&pdev->dev)->of_node,
+				"qcom,ce-opp-freq",
+				&qseecom.ce_opp_freq_hz)) {
+			pr_debug("CE operating frequency is not defined, setting to default 100MHZ\n");
+			qseecom.ce_opp_freq_hz = QSEE_CE_CLK_100MHZ;
+		}
+		rc = __qseecom_init_clk(CLK_QSEE);
+		if (rc)
+			goto exit_destroy_ion_client;
+
+		if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
+				(qseecom.support_pfe || qseecom.support_fde)) {
+			rc = __qseecom_init_clk(CLK_CE_DRV);
+			if (rc) {
+				__qseecom_deinit_clk(CLK_QSEE);
+				goto exit_destroy_ion_client;
+			}
+		} else {
+			struct qseecom_clk *qclk;
+
+			qclk = &qseecom.qsee;
+			qseecom.ce_drv.ce_core_clk = qclk->ce_core_clk;
+			qseecom.ce_drv.ce_clk = qclk->ce_clk;
+			qseecom.ce_drv.ce_core_src_clk = qclk->ce_core_src_clk;
+			qseecom.ce_drv.ce_bus_clk = qclk->ce_bus_clk;
+		}
+
+		qseecom_platform_support = (struct msm_bus_scale_pdata *)
+						msm_bus_cl_get_pdata(pdev);
+		if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
+			(!qseecom.is_apps_region_protected &&
+			!qseecom.appsbl_qseecom_support)) {
+			struct resource *resource = NULL;
+			struct qsee_apps_region_info_ireq req;
+			struct qsee_apps_region_info_64bit_ireq req_64bit;
+			struct qseecom_command_scm_resp resp;
+			void *cmd_buf = NULL;
+			size_t cmd_len;
+
+			resource = platform_get_resource_byname(pdev,
+					IORESOURCE_MEM, "secapp-region");
+			if (resource) {
+				if (qseecom.qsee_version < QSEE_VERSION_40) {
+					req.qsee_cmd_id =
+						QSEOS_APP_REGION_NOTIFICATION;
+					req.addr = (uint32_t)resource->start;
+					req.size = resource_size(resource);
+					cmd_buf = (void *)&req;
+					cmd_len = sizeof(struct
+						qsee_apps_region_info_ireq);
+					pr_warn("secure app region addr=0x%x size=0x%x",
+							req.addr, req.size);
+				} else {
+					req_64bit.qsee_cmd_id =
+						QSEOS_APP_REGION_NOTIFICATION;
+					req_64bit.addr = resource->start;
+					req_64bit.size = resource_size(
+							resource);
+					cmd_buf = (void *)&req_64bit;
+					cmd_len = sizeof(struct
+					qsee_apps_region_info_64bit_ireq);
+					pr_warn("secure app region addr=0x%llx size=0x%x",
+						req_64bit.addr, req_64bit.size);
+				}
+			} else {
+				pr_err("Fail to get secure app region info\n");
+				rc = -EINVAL;
+				goto exit_deinit_clock;
+			}
+			rc = __qseecom_enable_clk(CLK_QSEE);
+			if (rc) {
+				pr_err("CLK_QSEE enabling failed (%d)\n", rc);
+				rc = -EIO;
+				goto exit_deinit_clock;
+			}
+			rc = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
+					cmd_buf, cmd_len,
+					&resp, sizeof(resp));
+			__qseecom_disable_clk(CLK_QSEE);
+			if (rc || (resp.result != QSEOS_RESULT_SUCCESS)) {
+				pr_err("send secapp reg fail %d resp.res %d\n",
+							rc, resp.result);
+				rc = -EINVAL;
+				goto exit_deinit_clock;
+			}
+		}
+	/*
+	 * By default, appsbl only loads cmnlib. If OEM changes appsbl to
+	 * load cmnlib64 too, while cmnlib64 img is not present in non_hlos.bin,
+	 * Pls add "qseecom.commonlib64_loaded = true" here too.
+	 */
+		if (qseecom.is_apps_region_protected ||
+					qseecom.appsbl_qseecom_support)
+			qseecom.commonlib_loaded = true;
+	} else {
+		qseecom_platform_support = (struct msm_bus_scale_pdata *)
+						pdev->dev.platform_data;
+	}
+	if (qseecom.support_bus_scaling) {
+		init_timer(&(qseecom.bw_scale_down_timer));
+		INIT_WORK(&qseecom.bw_inactive_req_ws,
+					qseecom_bw_inactive_req_work);
+		qseecom.bw_scale_down_timer.function =
+				qseecom_scale_bus_bandwidth_timer_callback;
+	}
+	qseecom.timer_running = false;
+	qseecom.qsee_perf_client = msm_bus_scale_register_client(
+					qseecom_platform_support);
+
+	qseecom.whitelist_support = qseecom_check_whitelist_feature();
+	pr_warn("qseecom.whitelist_support = %d\n",
+				qseecom.whitelist_support);
+
+	if (!qseecom.qsee_perf_client)
+		pr_err("Unable to register bus client\n");
+
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
+	return 0;
+
+exit_deinit_clock:
+	__qseecom_deinit_clk(CLK_QSEE);
+	if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
+		(qseecom.support_pfe || qseecom.support_fde))
+		__qseecom_deinit_clk(CLK_CE_DRV);
+exit_destroy_ion_client:
+	if (qseecom.ce_info.fde) {
+		pce_info_use = qseecom.ce_info.fde;
+		for (i = 0; i < qseecom.ce_info.num_fde; i++) {
+			kzfree(pce_info_use->ce_pipe_entry);
+			pce_info_use++;
+		}
+		kfree(qseecom.ce_info.fde);
+	}
+	if (qseecom.ce_info.pfe) {
+		pce_info_use = qseecom.ce_info.pfe;
+		for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
+			kzfree(pce_info_use->ce_pipe_entry);
+			pce_info_use++;
+		}
+		kfree(qseecom.ce_info.pfe);
+	}
+	ion_client_destroy(qseecom.ion_clnt);
+exit_del_cdev:
+	cdev_del(&qseecom.cdev);
+exit_destroy_device:
+	device_destroy(driver_class, qseecom_device_no);
+exit_destroy_class:
+	class_destroy(driver_class);
+exit_unreg_chrdev_region:
+	unregister_chrdev_region(qseecom_device_no, 1);
+	return rc;
+}
+
+static int qseecom_remove(struct platform_device *pdev)
+{
+	struct qseecom_registered_kclient_list *kclient = NULL;
+	unsigned long flags = 0;
+	int ret = 0;
+	int i;
+	struct qseecom_ce_pipe_entry *pce_entry;
+	struct qseecom_ce_info_use *pce_info_use;
+
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_NOT_READY);
+	spin_lock_irqsave(&qseecom.registered_kclient_list_lock, flags);
+
+	list_for_each_entry(kclient, &qseecom.registered_kclient_list_head,
+								list) {
+		if (!kclient)
+			goto exit_irqrestore;
+
+		/* Break the loop if client handle is NULL */
+		if (!kclient->handle)
+			goto exit_free_kclient;
+
+		if (list_empty(&kclient->list))
+			goto exit_free_kc_handle;
+
+		list_del(&kclient->list);
+		mutex_lock(&app_access_lock);
+		ret = qseecom_unload_app(kclient->handle->dev, false);
+		mutex_unlock(&app_access_lock);
+		if (!ret) {
+			kzfree(kclient->handle->dev);
+			kzfree(kclient->handle);
+			kzfree(kclient);
+		}
+	}
+
+exit_free_kc_handle:
+	kzfree(kclient->handle);
+exit_free_kclient:
+	kzfree(kclient);
+exit_irqrestore:
+	spin_unlock_irqrestore(&qseecom.registered_kclient_list_lock, flags);
+
+	if (qseecom.qseos_version > QSEEE_VERSION_00)
+		qseecom_unload_commonlib_image();
+
+	if (qseecom.qsee_perf_client)
+		msm_bus_scale_client_update_request(qseecom.qsee_perf_client,
+									0);
+	if (pdev->dev.platform_data != NULL)
+		msm_bus_scale_unregister_client(qseecom.qsee_perf_client);
+
+	if (qseecom.support_bus_scaling) {
+		cancel_work_sync(&qseecom.bw_inactive_req_ws);
+		del_timer_sync(&qseecom.bw_scale_down_timer);
+	}
+
+	if (qseecom.ce_info.fde) {
+		pce_info_use = qseecom.ce_info.fde;
+		for (i = 0; i < qseecom.ce_info.num_fde; i++) {
+			pce_entry = pce_info_use->ce_pipe_entry;
+			kfree(pce_entry);
+			pce_info_use++;
+		}
+	}
+	kfree(qseecom.ce_info.fde);
+	if (qseecom.ce_info.pfe) {
+		pce_info_use = qseecom.ce_info.pfe;
+		for (i = 0; i < qseecom.ce_info.num_pfe; i++) {
+			pce_entry = pce_info_use->ce_pipe_entry;
+			kfree(pce_entry);
+			pce_info_use++;
+		}
+	}
+	kfree(qseecom.ce_info.pfe);
+
+	/* register client for bus scaling */
+	if (pdev->dev.of_node) {
+		__qseecom_deinit_clk(CLK_QSEE);
+		if ((qseecom.qsee.instance != qseecom.ce_drv.instance) &&
+				(qseecom.support_pfe || qseecom.support_fde))
+			__qseecom_deinit_clk(CLK_CE_DRV);
+	}
+
+	ion_client_destroy(qseecom.ion_clnt);
+
+	cdev_del(&qseecom.cdev);
+
+	device_destroy(driver_class, qseecom_device_no);
+
+	class_destroy(driver_class);
+
+	unregister_chrdev_region(qseecom_device_no, 1);
+
+	return ret;
+}
+
+static int qseecom_suspend(struct platform_device *pdev, pm_message_t state)
+{
+	int ret = 0;
+	struct qseecom_clk *qclk;
+
+	qclk = &qseecom.qsee;
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_SUSPEND);
+	if (qseecom.no_clock_support)
+		return 0;
+
+	mutex_lock(&qsee_bw_mutex);
+	mutex_lock(&clk_access_lock);
+
+	if (qseecom.current_mode != INACTIVE) {
+		ret = msm_bus_scale_client_update_request(
+			qseecom.qsee_perf_client, INACTIVE);
+		if (ret)
+			pr_err("Fail to scale down bus\n");
+		else
+			qseecom.current_mode = INACTIVE;
+	}
+
+	if (qclk->clk_access_cnt) {
+		if (qclk->ce_clk != NULL)
+			clk_disable_unprepare(qclk->ce_clk);
+		if (qclk->ce_core_clk != NULL)
+			clk_disable_unprepare(qclk->ce_core_clk);
+		if (qclk->ce_bus_clk != NULL)
+			clk_disable_unprepare(qclk->ce_bus_clk);
+	}
+
+	del_timer_sync(&(qseecom.bw_scale_down_timer));
+	qseecom.timer_running = false;
+
+	mutex_unlock(&clk_access_lock);
+	mutex_unlock(&qsee_bw_mutex);
+	cancel_work_sync(&qseecom.bw_inactive_req_ws);
+
+	return 0;
+}
+
+static int qseecom_resume(struct platform_device *pdev)
+{
+	int mode = 0;
+	int ret = 0;
+	struct qseecom_clk *qclk;
+
+	qclk = &qseecom.qsee;
+	if (qseecom.no_clock_support)
+		goto exit;
+
+	mutex_lock(&qsee_bw_mutex);
+	mutex_lock(&clk_access_lock);
+	if (qseecom.cumulative_mode >= HIGH)
+		mode = HIGH;
+	else
+		mode = qseecom.cumulative_mode;
+
+	if (qseecom.cumulative_mode != INACTIVE) {
+		ret = msm_bus_scale_client_update_request(
+			qseecom.qsee_perf_client, mode);
+		if (ret)
+			pr_err("Fail to scale up bus to %d\n", mode);
+		else
+			qseecom.current_mode = mode;
+	}
+
+	if (qclk->clk_access_cnt) {
+		if (qclk->ce_core_clk != NULL) {
+			ret = clk_prepare_enable(qclk->ce_core_clk);
+			if (ret) {
+				pr_err("Unable to enable/prep CE core clk\n");
+				qclk->clk_access_cnt = 0;
+				goto err;
+			}
+		}
+		if (qclk->ce_clk != NULL) {
+			ret = clk_prepare_enable(qclk->ce_clk);
+			if (ret) {
+				pr_err("Unable to enable/prep CE iface clk\n");
+				qclk->clk_access_cnt = 0;
+				goto ce_clk_err;
+			}
+		}
+		if (qclk->ce_bus_clk != NULL) {
+			ret = clk_prepare_enable(qclk->ce_bus_clk);
+			if (ret) {
+				pr_err("Unable to enable/prep CE bus clk\n");
+				qclk->clk_access_cnt = 0;
+				goto ce_bus_clk_err;
+			}
+		}
+	}
+
+	if (qclk->clk_access_cnt || qseecom.cumulative_mode) {
+		qseecom.bw_scale_down_timer.expires = jiffies +
+			msecs_to_jiffies(QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
+		mod_timer(&(qseecom.bw_scale_down_timer),
+				qseecom.bw_scale_down_timer.expires);
+		qseecom.timer_running = true;
+	}
+
+	mutex_unlock(&clk_access_lock);
+	mutex_unlock(&qsee_bw_mutex);
+	goto exit;
+
+ce_bus_clk_err:
+	if (qclk->ce_clk)
+		clk_disable_unprepare(qclk->ce_clk);
+ce_clk_err:
+	if (qclk->ce_core_clk)
+		clk_disable_unprepare(qclk->ce_core_clk);
+err:
+	mutex_unlock(&clk_access_lock);
+	mutex_unlock(&qsee_bw_mutex);
+	ret = -EIO;
+exit:
+	atomic_set(&qseecom.qseecom_state, QSEECOM_STATE_READY);
+	return ret;
+}
+
+static const struct of_device_id qseecom_match[] = {
+	{
+		.compatible = "qcom,qseecom",
+	},
+	{}
+};
+
+static struct platform_driver qseecom_plat_driver = {
+	.probe = qseecom_probe,
+	.remove = qseecom_remove,
+	.suspend = qseecom_suspend,
+	.resume = qseecom_resume,
+	.driver = {
+		.name = "qseecom",
+		.owner = THIS_MODULE,
+		.of_match_table = qseecom_match,
+	},
+};
+
+static int qseecom_init(void)
+{
+	return platform_driver_register(&qseecom_plat_driver);
+}
+
+static void qseecom_exit(void)
+{
+	platform_driver_unregister(&qseecom_plat_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("QTI Secure Execution Environment Communicator");
+
+module_init(qseecom_init);
+module_exit(qseecom_exit);
diff --git a/drivers/misc/qseecom_kernel.h b/drivers/misc/qseecom_kernel.h
new file mode 100644
index 0000000..5ca5839
--- /dev/null
+++ b/drivers/misc/qseecom_kernel.h
@@ -0,0 +1,44 @@
+/* Copyright (c) 2012-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QSEECOM_KERNEL_H_
+#define __QSEECOM_KERNEL_H_
+
+#include <linux/types.h>
+#include <soc/qcom/scm.h>
+
+#define QSEECOM_ALIGN_SIZE	0x40
+#define QSEECOM_ALIGN_MASK	(QSEECOM_ALIGN_SIZE - 1)
+#define QSEECOM_ALIGN(x)	\
+	((x + QSEECOM_ALIGN_MASK) & (~QSEECOM_ALIGN_MASK))
+
+/*
+ * struct qseecom_handle -
+ *      Handle to the qseecom device for kernel clients
+ * @sbuf - shared buffer pointer
+ * @sbbuf_len - shared buffer size
+ */
+struct qseecom_handle {
+	void *dev; /* in/out */
+	unsigned char *sbuf; /* in/out */
+	uint32_t sbuf_len; /* in/out */
+};
+
+int qseecom_start_app(struct qseecom_handle **handle,
+						char *app_name, uint32_t size);
+int qseecom_shutdown_app(struct qseecom_handle **handle);
+int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
+			uint32_t sbuf_len, void *resp_buf, uint32_t rbuf_len);
+int qseecom_set_bandwidth(struct qseecom_handle *handle, bool high);
+int qseecom_process_listener_from_smcinvoke(struct scm_desc *desc);
+
+#endif /* __QSEECOM_KERNEL_H_ */
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index e19d912..9551238 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -1823,6 +1823,7 @@
 	 * During a signal voltage level switch, the clock must be gated
 	 * for 5 ms according to the SD spec
 	 */
+	host->card_clock_off = true;
 	clock = host->ios.clock;
 	host->ios.clock = 0;
 	mmc_set_ios(host);
@@ -1833,6 +1834,9 @@
 		 * sent CMD11, so a power cycle is required anyway
 		 */
 		err = -EAGAIN;
+		host->ios.clock = clock;
+		mmc_set_ios(host);
+		host->card_clock_off = false;
 		goto power_cycle;
 	}
 
@@ -1841,6 +1845,7 @@
 	host->ios.clock = clock;
 	mmc_set_ios(host);
 
+	host->card_clock_off = false;
 	/* Wait for at least 1 ms according to spec */
 	mmc_delay(1);
 
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index f18105f..e9f74a2 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -296,6 +296,10 @@
 {
 }
 
+bool mmc_host_may_gate_card(struct mmc_card *card)
+{
+	return false;
+}
 #endif
 
 void mmc_retune_enable(struct mmc_host *host)
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index b2fdb19..cf48a04 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2,7 +2,7 @@
  * drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
  * driver source file
  *
- * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 and
@@ -32,17 +32,33 @@
 #include <linux/scatterlist.h>
 #include <linux/slab.h>
 #include <linux/mmc/mmc.h>
+#include <linux/mmc/slot-gpio.h>
+#include <linux/dma-mapping.h>
+#include <linux/iopoll.h>
 #include <linux/msm-bus.h>
 
 #include "sdhci-pltfm.h"
 
 #define SDHCI_VER_100		0x2B
+
+#define CORE_VERSION_MAJOR_MASK		0xF0000000
+#define CORE_VERSION_MAJOR_SHIFT	28
+
 #define CORE_HC_MODE		0x78
 #define HC_MODE_EN		0x1
+#define FF_CLK_SW_RST_DIS	(1 << 13)
+
+#define CORE_GENERICS		0x70
+#define SWITCHABLE_SIGNALLING_VOL (1 << 29)
 
 #define CORE_POWER		0x0
 #define CORE_SW_RST		(1 << 7)
 
+#define CORE_MCI_VERSION	0x050
+#define CORE_TESTBUS_CONFIG	0x0CC
+#define CORE_TESTBUS_ENA	(1 << 3)
+#define CORE_SDCC_DEBUG_REG	0x124
+
 #define CORE_PWRCTL_STATUS	0xDC
 #define CORE_PWRCTL_MASK	0xE0
 #define CORE_PWRCTL_CLEAR	0xE4
@@ -61,25 +77,83 @@
 #define INT_MASK		0xF
 #define MAX_PHASES		16
 
-#define CORE_DLL_LOCK		(1 << 7)
+#define CORE_DLL_CONFIG		0x100
+#define CORE_CMD_DAT_TRACK_SEL	(1 << 0)
 #define CORE_DLL_EN		(1 << 16)
 #define CORE_CDR_EN		(1 << 17)
 #define CORE_CK_OUT_EN		(1 << 18)
 #define CORE_CDR_EXT_EN		(1 << 19)
 #define CORE_DLL_PDN		(1 << 29)
 #define CORE_DLL_RST		(1 << 30)
-#define CORE_DLL_CONFIG		0x100
-#define CORE_DLL_TEST_CTL	0x104
+
 #define CORE_DLL_STATUS		0x108
+#define CORE_DLL_LOCK		(1 << 7)
 
 #define CORE_VENDOR_SPEC	0x10C
 #define CORE_CLK_PWRSAVE	(1 << 1)
+#define CORE_HC_MCLK_SEL_DFLT	(2 << 8)
+#define CORE_HC_MCLK_SEL_HS400	(3 << 8)
+#define CORE_HC_MCLK_SEL_MASK	(3 << 8)
+#define CORE_HC_AUTO_CMD21_EN	(1 << 6)
 #define CORE_IO_PAD_PWR_SWITCH	(1 << 16)
+#define CORE_HC_SELECT_IN_EN	(1 << 18)
+#define CORE_HC_SELECT_IN_HS400	(6 << 19)
+#define CORE_HC_SELECT_IN_MASK	(7 << 19)
+
+#define CORE_VENDOR_SPEC_CAPABILITIES0	0x11C
+#define CORE_8_BIT_SUPPORT		(1 << 18)
+#define CORE_3_3V_SUPPORT		(1 << 24)
+#define CORE_3_0V_SUPPORT		(1 << 25)
+#define CORE_1_8V_SUPPORT		(1 << 26)
+#define CORE_SYS_BUS_SUPPORT_64_BIT	28
+
+#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR0	0x114
+#define CORE_VENDOR_SPEC_ADMA_ERR_ADDR1	0x118
+
+#define CORE_CSR_CDC_CTLR_CFG0		0x130
+#define CORE_SW_TRIG_FULL_CALIB		(1 << 16)
+#define CORE_HW_AUTOCAL_ENA		(1 << 17)
+
+#define CORE_CSR_CDC_CTLR_CFG1		0x134
+#define CORE_CSR_CDC_CAL_TIMER_CFG0	0x138
+#define CORE_TIMER_ENA			(1 << 16)
+
+#define CORE_CSR_CDC_CAL_TIMER_CFG1	0x13C
+#define CORE_CSR_CDC_REFCOUNT_CFG	0x140
+#define CORE_CSR_CDC_COARSE_CAL_CFG	0x144
+#define CORE_CDC_OFFSET_CFG		0x14C
+#define CORE_CSR_CDC_DELAY_CFG		0x150
+#define CORE_CDC_SLAVE_DDA_CFG		0x160
+#define CORE_CSR_CDC_STATUS0		0x164
+#define CORE_CALIBRATION_DONE		(1 << 0)
+
+#define CORE_CDC_ERROR_CODE_MASK	0x7000000
+
+#define CORE_CSR_CDC_GEN_CFG		0x178
+#define CORE_CDC_SWITCH_BYPASS_OFF	(1 << 0)
+#define CORE_CDC_SWITCH_RC_EN		(1 << 1)
+
+#define CORE_DDR_200_CFG		0x184
+#define CORE_CDC_T4_DLY_SEL		(1 << 0)
+#define CORE_START_CDC_TRAFFIC		(1 << 6)
+
+#define CORE_MCI_DATA_CNT 0x30
+#define CORE_MCI_STATUS 0x34
+#define CORE_MCI_FIFO_CNT 0x44
+
+#define CORE_TESTBUS_SEL2_BIT	4
+#define CORE_TESTBUS_SEL2	(1 << CORE_TESTBUS_SEL2_BIT)
 
 /* 8KB descriptors */
 #define SDHCI_MSM_MAX_SEGMENTS  (1 << 13)
 #define SDHCI_MSM_MMC_CLK_GATE_DELAY	200 /* msecs */
 
+#define CORE_FREQ_100MHZ	(100 * 1000 * 1000)
+
+#define INVALID_TUNING_PHASE	-1
+
+#define CORE_VERSION_TARGET_MASK	0x000000FF
+
 static const u32 tuning_block_64[] = {
 	0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
 	0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
@@ -98,6 +172,10 @@
 	0xFFFFBBBB, 0xFFFF77FF, 0xFF7777FF, 0xEEDDBB77
 };
 
+static int disable_slots;
+/* root can write, others read */
+module_param(disable_slots, int, S_IRUGO|S_IWUSR);
+
 /* This structure keeps information per regulator */
 struct sdhci_msm_reg_data {
 	/* voltage regulator handle */
@@ -165,12 +243,14 @@
 	u32 caps2;
 
 	unsigned long mmc_bus_width;
-	u32 max_clk;
 	struct sdhci_msm_slot_reg_data *vreg_data;
 	bool nonremovable;
 	struct sdhci_msm_pin_data *pin_data;
 	u32 cpu_dma_latency_us;
+	int status_gpio; /* card detection GPIO that is configured as IRQ */
 	struct sdhci_msm_bus_voting_data *voting_data;
+	u32 *sup_clk_table;
+	unsigned char sup_clk_cnt;
 };
 
 struct sdhci_msm_bus_vote {
@@ -189,12 +269,24 @@
 	struct clk	 *clk;     /* main SD/MMC bus clock */
 	struct clk	 *pclk;    /* SDHC peripheral bus clock */
 	struct clk	 *bus_clk; /* SDHC bus voter clock */
+	struct clk	 *ff_clk; /* CDC calibration fixed feedback clock */
+	struct clk	 *sleep_clk; /* CDC calibration sleep clock */
 	atomic_t clks_on; /* Set if clocks are enabled */
 	struct sdhci_msm_pltfm_data *pdata;
 	struct mmc_host  *mmc;
 	struct sdhci_pltfm_data sdhci_msm_pdata;
-	wait_queue_head_t pwr_irq_wait;
+	u32 curr_pwr_state;
+	u32 curr_io_level;
+	struct completion pwr_irq_completion;
 	struct sdhci_msm_bus_vote msm_bus_vote;
+	struct device_attribute	polling;
+	u32 clk_rate; /* Keeps track of current clock rate that is set */
+	bool tuning_done;
+	bool calibration_done;
+	u8 saved_tuning_phase;
+	bool en_auto_cmd21;
+	struct device_attribute auto_cmd21_attr;
+	atomic_t controller_clock;
 };
 
 enum vdd_io_level {
@@ -238,6 +330,93 @@
 	return rc;
 }
 
+/*
+ * Enable CDR to track changes of DAT lines and adjust sampling
+ * point according to voltage/temperature variations
+ */
+static int msm_enable_cdr_cm_sdc4_dll(struct sdhci_host *host)
+{
+	int rc = 0;
+	u32 config;
+
+	config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG);
+	config |= CORE_CDR_EN;
+	config &= ~(CORE_CDR_EXT_EN | CORE_CK_OUT_EN);
+	writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG);
+
+	rc = msm_dll_poll_ck_out_en(host, 0);
+	if (rc)
+		goto err;
+
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) |
+			CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG);
+
+	rc = msm_dll_poll_ck_out_en(host, 1);
+	if (rc)
+		goto err;
+	goto out;
+err:
+	pr_err("%s: %s: failed\n", mmc_hostname(host->mmc), __func__);
+out:
+	return rc;
+}
+
+static ssize_t store_auto_cmd21(struct device *dev, struct device_attribute
+				*attr, const char *buf, size_t count)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	u32 tmp;
+	unsigned long flags;
+
+	if (!kstrtou32(buf, 0, &tmp)) {
+		spin_lock_irqsave(&host->lock, flags);
+		msm_host->en_auto_cmd21 = !!tmp;
+		spin_unlock_irqrestore(&host->lock, flags);
+	}
+	return count;
+}
+
+static ssize_t show_auto_cmd21(struct device *dev,
+			       struct device_attribute *attr, char *buf)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", msm_host->en_auto_cmd21);
+}
+
+/* MSM auto-tuning handler */
+static int sdhci_msm_config_auto_tuning_cmd(struct sdhci_host *host,
+					    bool enable,
+					    u32 type)
+{
+	int rc = 0;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	u32 val = 0;
+
+	if (!msm_host->en_auto_cmd21)
+		return 0;
+
+	if (type == MMC_SEND_TUNING_BLOCK_HS200)
+		val = CORE_HC_AUTO_CMD21_EN;
+	else
+		return 0;
+
+	if (enable) {
+		rc = msm_enable_cdr_cm_sdc4_dll(host);
+		writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
+			       val, host->ioaddr + CORE_VENDOR_SPEC);
+	} else {
+		writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
+			       ~val, host->ioaddr + CORE_VENDOR_SPEC);
+	}
+	return rc;
+}
+
 static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase)
 {
 	int rc = 0;
@@ -298,8 +477,8 @@
  * Find out the greatest range of consecuitive selected
  * DLL clock output phases that can be used as sampling
  * setting for SD3.0 UHS-I card read operation (in SDR104
- * timing mode) or for eMMC4.5 card read operation (in HS200
- * timing mode).
+ * timing mode) or for eMMC4.5 card read operation (in
+ * HS400/HS200 timing mode).
  * Select the 3/4 of the range and configure the DLL with the
  * selected DLL clock output phase.
  */
@@ -442,19 +621,25 @@
 	int rc = 0;
 	unsigned long flags;
 	u32 wait_cnt;
+	bool prev_pwrsave, curr_pwrsave;
 
 	pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
 	spin_lock_irqsave(&host->lock, flags);
-
+	prev_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
+			  CORE_CLK_PWRSAVE);
+	curr_pwrsave = prev_pwrsave;
 	/*
 	 * Make sure that clock is always enabled when DLL
 	 * tuning is in progress. Keeping PWRSAVE ON may
 	 * turn off the clock. So let's disable the PWRSAVE
 	 * here and re-enable it once tuning is completed.
 	 */
-	writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
-			& ~CORE_CLK_PWRSAVE),
-			host->ioaddr + CORE_VENDOR_SPEC);
+	if (prev_pwrsave) {
+		writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
+				& ~CORE_CLK_PWRSAVE),
+				host->ioaddr + CORE_VENDOR_SPEC);
+		curr_pwrsave = false;
+	}
 
 	/* Write 1 to DLL_RST bit of DLL_CONFIG register */
 	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
@@ -497,26 +682,190 @@
 	}
 
 out:
-	/* re-enable PWRSAVE */
-	writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
-			CORE_CLK_PWRSAVE),
-			host->ioaddr + CORE_VENDOR_SPEC);
+	/* Restore the correct PWRSAVE state */
+	if (prev_pwrsave ^ curr_pwrsave) {
+		u32 reg = readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC);
+
+		if (prev_pwrsave)
+			reg |= CORE_CLK_PWRSAVE;
+		else
+			reg &= ~CORE_CLK_PWRSAVE;
+
+		writel_relaxed(reg, host->ioaddr + CORE_VENDOR_SPEC);
+	}
+
 	spin_unlock_irqrestore(&host->lock, flags);
 	pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
 	return rc;
 }
 
+static int sdhci_msm_cdclp533_calibration(struct sdhci_host *host)
+{
+	u32 wait_cnt;
+	int ret = 0;
+	int cdc_err = 0;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	pr_debug("%s: Enter %s\n", mmc_hostname(host->mmc), __func__);
+
+	/*
+	 * Retuning in HS400 (DDR mode) will fail, just reset the
+	 * tuning block and restore the saved tuning phase.
+	 */
+	ret = msm_init_cm_dll(host);
+	if (ret)
+		goto out;
+
+	/* Set the selected phase in delay line hw block */
+	ret = msm_config_cm_dll_phase(host, msm_host->saved_tuning_phase);
+	if (ret)
+		goto out;
+
+	/* Write 1 to CMD_DAT_TRACK_SEL field in DLL_CONFIG */
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+			| CORE_CMD_DAT_TRACK_SEL),
+			host->ioaddr + CORE_DLL_CONFIG);
+
+	/* Write 0 to CDC_T4_DLY_SEL field in VENDOR_SPEC_DDR200_CFG */
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
+			& ~CORE_CDC_T4_DLY_SEL),
+			host->ioaddr + CORE_DDR_200_CFG);
+
+	/* Write 0 to CDC_SWITCH_BYPASS_OFF field in CORE_CSR_CDC_GEN_CFG */
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
+			& ~CORE_CDC_SWITCH_BYPASS_OFF),
+			host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+
+	/* Write 1 to CDC_SWITCH_RC_EN field in CORE_CSR_CDC_GEN_CFG */
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_GEN_CFG)
+			| CORE_CDC_SWITCH_RC_EN),
+			host->ioaddr + CORE_CSR_CDC_GEN_CFG);
+
+	/* Write 0 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
+			& ~CORE_START_CDC_TRAFFIC),
+			host->ioaddr + CORE_DDR_200_CFG);
+
+	/*
+	 * Perform CDC Register Initialization Sequence
+	 *
+	 * CORE_CSR_CDC_CTLR_CFG0	0x11800EC
+	 * CORE_CSR_CDC_CTLR_CFG1	0x3011111
+	 * CORE_CSR_CDC_CAL_TIMER_CFG0	0x1201000
+	 * CORE_CSR_CDC_CAL_TIMER_CFG1	0x4
+	 * CORE_CSR_CDC_REFCOUNT_CFG	0xCB732020
+	 * CORE_CSR_CDC_COARSE_CAL_CFG	0xB19
+	 * CORE_CSR_CDC_DELAY_CFG	0x3AC
+	 * CORE_CDC_OFFSET_CFG		0x0
+	 * CORE_CDC_SLAVE_DDA_CFG	0x16334
+	 */
+
+	writel_relaxed(0x11800EC, host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+	writel_relaxed(0x3011111, host->ioaddr + CORE_CSR_CDC_CTLR_CFG1);
+	writel_relaxed(0x1201000, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+	writel_relaxed(0x4, host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG1);
+	writel_relaxed(0xCB732020, host->ioaddr + CORE_CSR_CDC_REFCOUNT_CFG);
+	writel_relaxed(0xB19, host->ioaddr + CORE_CSR_CDC_COARSE_CAL_CFG);
+	writel_relaxed(0x3AC, host->ioaddr + CORE_CSR_CDC_DELAY_CFG);
+	writel_relaxed(0x0, host->ioaddr + CORE_CDC_OFFSET_CFG);
+	writel_relaxed(0x16334, host->ioaddr + CORE_CDC_SLAVE_DDA_CFG);
+
+	/* CDC HW Calibration */
+
+	/* Write 1 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
+			| CORE_SW_TRIG_FULL_CALIB),
+			host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+	/* Write 0 to SW_TRIG_FULL_CALIB field in CORE_CSR_CDC_CTLR_CFG0 */
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
+			& ~CORE_SW_TRIG_FULL_CALIB),
+			host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+	/* Write 1 to HW_AUTOCAL_ENA field in CORE_CSR_CDC_CTLR_CFG0 */
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_CSR_CDC_CTLR_CFG0)
+			| CORE_HW_AUTOCAL_ENA),
+			host->ioaddr + CORE_CSR_CDC_CTLR_CFG0);
+
+	/* Write 1 to TIMER_ENA field in CORE_CSR_CDC_CAL_TIMER_CFG0 */
+	writel_relaxed((readl_relaxed(host->ioaddr +
+			CORE_CSR_CDC_CAL_TIMER_CFG0) | CORE_TIMER_ENA),
+			host->ioaddr + CORE_CSR_CDC_CAL_TIMER_CFG0);
+
+	mb();
+
+	/* Poll on CALIBRATION_DONE field in CORE_CSR_CDC_STATUS0 to be 1 */
+	wait_cnt = 50;
+	while (!(readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
+			& CORE_CALIBRATION_DONE)) {
+		/* max. wait for 50us sec for CALIBRATION_DONE bit to be set */
+		if (--wait_cnt == 0) {
+			pr_err("%s: %s: CDC Calibration was not completed\n",
+				mmc_hostname(host->mmc), __func__);
+			ret = -ETIMEDOUT;
+			goto out;
+		}
+		/* wait for 1us before polling again */
+		udelay(1);
+	}
+
+	/* Verify CDC_ERROR_CODE field in CORE_CSR_CDC_STATUS0 is 0 */
+	cdc_err = readl_relaxed(host->ioaddr + CORE_CSR_CDC_STATUS0)
+			& CORE_CDC_ERROR_CODE_MASK;
+	if (cdc_err) {
+		pr_err("%s: %s: CDC Error Code %d\n",
+			mmc_hostname(host->mmc), __func__, cdc_err);
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* Write 1 to START_CDC_TRAFFIC field in CORE_DDR200_CFG */
+	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DDR_200_CFG)
+			| CORE_START_CDC_TRAFFIC),
+			host->ioaddr + CORE_DDR_200_CFG);
+out:
+	pr_debug("%s: Exit %s, ret:%d\n", mmc_hostname(host->mmc),
+			__func__, ret);
+	return ret;
+}
+
 int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode)
 {
 	unsigned long flags;
+	int tuning_seq_cnt = 3;
 	u8 phase, *data_buf, tuned_phases[16], tuned_phase_cnt = 0;
 	const u32 *tuning_block_pattern = tuning_block_64;
 	int size = sizeof(tuning_block_64); /* Tuning pattern size in bytes */
 	int rc;
 	struct mmc_host *mmc = host->mmc;
+	struct mmc_ios	ios = host->mmc->ios;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
+	/*
+	 * Tuning is required for SDR104, HS200 and HS400 cards and
+	 * if clock frequency is greater than 100MHz in these modes.
+	 */
+	if (host->clock <= CORE_FREQ_100MHZ ||
+		!((ios.timing == MMC_TIMING_MMC_HS400) ||
+		(ios.timing == MMC_TIMING_MMC_HS200) ||
+		(ios.timing == MMC_TIMING_UHS_SDR104)))
+		return 0;
 
 	pr_debug("%s: Enter %s\n", mmc_hostname(mmc), __func__);
-	/* Tuning is only required for SDR104 modes */
+
+	/* CDCLP533 HW calibration is only required for HS400 mode*/
+	if (msm_host->tuning_done && !msm_host->calibration_done &&
+		(mmc->ios.timing == MMC_TIMING_MMC_HS400)) {
+		rc = sdhci_msm_cdclp533_calibration(host);
+		spin_lock_irqsave(&host->lock, flags);
+		if (!rc)
+			msm_host->calibration_done = true;
+		spin_unlock_irqrestore(&host->lock, flags);
+		goto out;
+	}
+
 	spin_lock_irqsave(&host->lock, flags);
 
 	if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) &&
@@ -526,17 +875,18 @@
 	}
 	spin_unlock_irqrestore(&host->lock, flags);
 
-	/* first of all reset the tuning block */
-	rc = msm_init_cm_dll(host);
-	if (rc)
-		goto out;
-
 	data_buf = kmalloc(size, GFP_KERNEL);
 	if (!data_buf) {
 		rc = -ENOMEM;
 		goto out;
 	}
 
+retry:
+	/* first of all reset the tuning block */
+	rc = msm_init_cm_dll(host);
+	if (rc)
+		goto kfree;
+
 	phase = 0;
 	do {
 		struct mmc_command cmd = {0};
@@ -590,19 +940,26 @@
 		rc = msm_config_cm_dll_phase(host, phase);
 		if (rc)
 			goto kfree;
+		msm_host->saved_tuning_phase = phase;
 		pr_debug("%s: %s: finally setting the tuning phase to %d\n",
 				mmc_hostname(mmc), __func__, phase);
 	} else {
+		if (--tuning_seq_cnt)
+			goto retry;
 		/* tuning failed */
 		pr_err("%s: %s: no tuning point found\n",
 			mmc_hostname(mmc), __func__);
-		rc = -EAGAIN;
+		rc = -EIO;
 	}
 
 kfree:
 	kfree(data_buf);
 out:
-	pr_debug("%s: Exit %s\n", mmc_hostname(mmc), __func__);
+	spin_lock_irqsave(&host->lock, flags);
+	if (!rc)
+		msm_host->tuning_done = true;
+	spin_unlock_irqrestore(&host->lock, flags);
+	pr_debug("%s: Exit %s, err(%d)\n", mmc_hostname(mmc), __func__, rc);
 	return rc;
 }
 
@@ -671,7 +1028,7 @@
 		goto out;
 	}
 	sz = *len = *len / sizeof(*arr);
-	if (sz <= 0 || (size > 0 && (sz != size))) {
+	if (sz <= 0 || (size > 0 && (sz > size))) {
 		dev_err(dev, "%s invalid size\n", prop_name);
 		ret = -EINVAL;
 		goto out;
@@ -708,8 +1065,7 @@
 
 	snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", vreg_name);
 	if (!of_parse_phandle(np, prop_name, 0)) {
-		dev_err(dev, "No vreg data found for %s\n", vreg_name);
-		ret = -EINVAL;
+		dev_info(dev, "No vreg data found for %s\n", vreg_name);
 		return ret;
 	}
 
@@ -828,6 +1184,9 @@
 	u32 bus_width = 0;
 	u32 cpu_dma_latency;
 	int len, i;
+	int clk_table_len;
+	u32 *clk_table = NULL;
+	enum of_gpio_flags flags = OF_GPIO_ACTIVE_LOW;
 
 	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
 	if (!pdata) {
@@ -835,6 +1194,10 @@
 		goto out;
 	}
 
+	pdata->status_gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags);
+	if (gpio_is_valid(pdata->status_gpio) & !(flags & OF_GPIO_ACTIVE_LOW))
+		pdata->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+
 	of_property_read_u32(np, "qcom,bus-width", &bus_width);
 	if (bus_width == 8)
 		pdata->mmc_bus_width = MMC_CAP_8_BIT_DATA;
@@ -849,6 +1212,18 @@
 				&cpu_dma_latency))
 		pdata->cpu_dma_latency_us = cpu_dma_latency;
 
+	if (sdhci_msm_dt_get_array(dev, "qcom,clk-rates",
+			&clk_table, &clk_table_len, 0)) {
+		dev_err(dev, "failed parsing supported clock rates\n");
+		goto out;
+	}
+	if (!clk_table || !clk_table_len) {
+		dev_err(dev, "Invalid clock table\n");
+		goto out;
+	}
+	pdata->sup_clk_table = clk_table;
+	pdata->sup_clk_cnt = clk_table_len;
+
 	pdata->vreg_data = devm_kzalloc(dev, sizeof(struct
 						    sdhci_msm_slot_reg_data),
 					GFP_KERNEL);
@@ -874,8 +1249,6 @@
 		goto out;
 	}
 
-	of_property_read_u32(np, "qcom,max-clk-rate", &pdata->max_clk);
-
 	len = of_property_count_strings(np, "qcom,bus-speed-mode");
 
 	for (i = 0; i < len; i++) {
@@ -886,7 +1259,11 @@
 		if (!name)
 			continue;
 
-		if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
+		if (!strncmp(name, "HS400_1p8v", sizeof("HS400_1p8v")))
+			pdata->caps2 |= MMC_CAP2_HS400_1_8V;
+		else if (!strncmp(name, "HS400_1p2v", sizeof("HS400_1p2v")))
+			pdata->caps2 |= MMC_CAP2_HS400_1_2V;
+		else if (!strncmp(name, "HS200_1p8v", sizeof("HS200_1p8v")))
 			pdata->caps2 |= MMC_CAP2_HS200_1_8V_SDR;
 		else if (!strncmp(name, "HS200_1p2v", sizeof("HS200_1p2v")))
 			pdata->caps2 |= MMC_CAP2_HS200_1_2V_SDR;
@@ -910,9 +1287,12 @@
 static unsigned int sdhci_get_bw_required(struct sdhci_host *host,
 					struct mmc_ios *ios)
 {
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+
 	unsigned int bw;
 
-	bw = host->clock;
+	bw = msm_host->clk_rate;
 	/*
 	 * For DDR mode, SDCC controller clock will be at
 	 * the double rate than the actual clock that goes to card.
@@ -953,16 +1333,18 @@
  */
 static inline int sdhci_msm_bus_set_vote(struct sdhci_msm_host *msm_host,
 					     int vote,
-					     unsigned long flags)
+					     unsigned long *flags)
 {
 	struct sdhci_host *host =  platform_get_drvdata(msm_host->pdev);
 	int rc = 0;
 
+	BUG_ON(!flags);
+
 	if (vote != msm_host->msm_bus_vote.curr_vote) {
-		spin_unlock_irqrestore(&host->lock, flags);
+		spin_unlock_irqrestore(&host->lock, *flags);
 		rc = msm_bus_scale_client_update_request(
 				msm_host->msm_bus_vote.client_handle, vote);
-		spin_lock_irqsave(&host->lock, flags);
+		spin_lock_irqsave(&host->lock, *flags);
 		if (rc) {
 			pr_err("%s: msm_bus_scale_client_update_request() failed: bus_client_handle=0x%x, vote=%d, err=%d\n",
 				mmc_hostname(host->mmc),
@@ -995,7 +1377,7 @@
 	/* don't vote for 0 bandwidth if any request is in progress */
 	if (!host->mrq) {
 		sdhci_msm_bus_set_vote(msm_host,
-			msm_host->msm_bus_vote.min_bw_vote, flags);
+			msm_host->msm_bus_vote.min_bw_vote, &flags);
 	} else
 		pr_warning("%s: %s: Transfer in progress. skipping bus voting to 0 bandwidth\n",
 			   mmc_hostname(host->mmc), __func__);
@@ -1017,7 +1399,7 @@
 	cancel_delayed_work_sync(&msm_host->msm_bus_vote.vote_work);
 	spin_lock_irqsave(&host->lock, flags);
 	vote = sdhci_msm_bus_get_vote_for_bw(msm_host, bw);
-	sdhci_msm_bus_set_vote(msm_host, vote, flags);
+	sdhci_msm_bus_set_vote(msm_host, vote, &flags);
 	spin_unlock_irqrestore(&host->lock, flags);
 }
 
@@ -1112,10 +1494,22 @@
 		return;
 
 	bw = sdhci_get_bw_required(host, ios);
-	if (enable)
+	if (enable) {
 		sdhci_msm_bus_cancel_work_and_set_vote(host, bw);
-	else
-		sdhci_msm_bus_queue_work(host);
+	} else {
+		/*
+		 * If clock gating is enabled, then remove the vote
+		 * immediately because clocks will be disabled only
+		 * after SDHCI_MSM_MMC_CLK_GATE_DELAY and thus no
+		 * additional delay is required to remove the bus vote.
+		 */
+#ifdef CONFIG_MMC_CLKGATE
+		if (host->mmc->clkgate_delay)
+			sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+		else
+#endif
+			sdhci_msm_bus_queue_work(host);
+	}
 }
 
 /* Regulator utility functions */
@@ -1137,11 +1531,14 @@
 		goto out;
 	}
 
-	/* sanity check */
-	if (!vreg->high_vol_level || !vreg->hpm_uA) {
-		pr_err("%s: %s invalid constraints specified\n",
-		       __func__, vreg->name);
-		ret = -EINVAL;
+	if (regulator_count_voltages(vreg->reg) > 0) {
+		vreg->set_voltage_sup = true;
+		/* sanity check */
+		if (!vreg->high_vol_level || !vreg->hpm_uA) {
+			pr_err("%s: %s invalid constraints specified\n",
+			       __func__, vreg->name);
+			ret = -EINVAL;
+		}
 	}
 
 out:
@@ -1182,12 +1579,13 @@
 					int min_uV, int max_uV)
 {
 	int ret = 0;
-
-	ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
-	if (ret) {
-		pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
+	if (vreg->set_voltage_sup) {
+		ret = regulator_set_voltage(vreg->reg, min_uV, max_uV);
+		if (ret) {
+			pr_err("%s: regulator_set_voltage(%s)failed. min_uV=%d,max_uV=%d,ret=%d\n",
 			       __func__, vreg->name, min_uV, max_uV, ret);
 		}
+	}
 
 	return ret;
 }
@@ -1392,6 +1790,8 @@
 	u8 irq_status = 0;
 	u8 irq_ack = 0;
 	int ret = 0;
+	int pwr_state = 0, io_level = 0;
+	unsigned long flags;
 
 	irq_status = readb_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
 	pr_debug("%s: Received IRQ(%d), status=0x%x\n",
@@ -1410,21 +1810,33 @@
 	/* Handle BUS ON/OFF*/
 	if (irq_status & CORE_PWRCTL_BUS_ON) {
 		ret = sdhci_msm_setup_vreg(msm_host->pdata, true, false);
-		if (!ret)
+		if (!ret) {
 			ret = sdhci_msm_setup_pins(msm_host->pdata, true);
+			ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
+					VDD_IO_HIGH, 0);
+		}
 		if (ret)
 			irq_ack |= CORE_PWRCTL_BUS_FAIL;
 		else
 			irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+
+		pwr_state = REQ_BUS_ON;
+		io_level = REQ_IO_HIGH;
 	}
 	if (irq_status & CORE_PWRCTL_BUS_OFF) {
 		ret = sdhci_msm_setup_vreg(msm_host->pdata, false, false);
-		if (!ret)
+		if (!ret) {
 			ret = sdhci_msm_setup_pins(msm_host->pdata, false);
+			ret |= sdhci_msm_set_vdd_io_vol(msm_host->pdata,
+					VDD_IO_LOW, 0);
+		}
 		if (ret)
 			irq_ack |= CORE_PWRCTL_BUS_FAIL;
 		else
 			irq_ack |= CORE_PWRCTL_BUS_SUCCESS;
+
+		pwr_state = REQ_BUS_OFF;
+		io_level = REQ_IO_LOW;
 	}
 	/* Handle IO LOW/HIGH */
 	if (irq_status & CORE_PWRCTL_IO_LOW) {
@@ -1434,6 +1846,8 @@
 			irq_ack |= CORE_PWRCTL_IO_FAIL;
 		else
 			irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+
+		io_level = REQ_IO_LOW;
 	}
 	if (irq_status & CORE_PWRCTL_IO_HIGH) {
 		/* Switch voltage High */
@@ -1442,6 +1856,8 @@
 			irq_ack |= CORE_PWRCTL_IO_FAIL;
 		else
 			irq_ack |= CORE_PWRCTL_IO_SUCCESS;
+
+		io_level = REQ_IO_HIGH;
 	}
 
 	/* ACK status to the core */
@@ -1454,11 +1870,11 @@
 	 */
 	mb();
 
-	if (irq_status & CORE_PWRCTL_IO_HIGH)
+	if (io_level & REQ_IO_HIGH)
 		writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
 				~CORE_IO_PAD_PWR_SWITCH),
 				host->ioaddr + CORE_VENDOR_SPEC);
-	if (irq_status & CORE_PWRCTL_IO_LOW)
+	else if (io_level & REQ_IO_LOW)
 		writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) |
 				CORE_IO_PAD_PWR_SWITCH),
 				host->ioaddr + CORE_VENDOR_SPEC);
@@ -1466,11 +1882,53 @@
 
 	pr_debug("%s: Handled IRQ(%d), ret=%d, ack=0x%x\n",
 		mmc_hostname(msm_host->mmc), irq, ret, irq_ack);
-	wake_up_interruptible(&msm_host->pwr_irq_wait);
+	spin_lock_irqsave(&host->lock, flags);
+	if (pwr_state)
+		msm_host->curr_pwr_state = pwr_state;
+	if (io_level)
+		msm_host->curr_io_level = io_level;
+	complete(&msm_host->pwr_irq_completion);
+	spin_unlock_irqrestore(&host->lock, flags);
+
 	return IRQ_HANDLED;
 }
 
 static ssize_t
+show_polling(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	int poll;
+	unsigned long flags;
+
+	spin_lock_irqsave(&host->lock, flags);
+	poll = !!(host->mmc->caps & MMC_CAP_NEEDS_POLL);
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", poll);
+}
+
+static ssize_t
+store_polling(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	int value;
+	unsigned long flags;
+
+	if (!kstrtou32(buf, 0, &value)) {
+		spin_lock_irqsave(&host->lock, flags);
+		if (value) {
+			host->mmc->caps |= MMC_CAP_NEEDS_POLL;
+			mmc_detect_change(host->mmc, 0);
+		} else {
+			host->mmc->caps &= ~MMC_CAP_NEEDS_POLL;
+		}
+		spin_unlock_irqrestore(&host->lock, flags);
+	}
+	return count;
+}
+
+static ssize_t
 show_sdhci_max_bus_bw(struct device *dev, struct device_attribute *attr,
 			char *buf)
 {
@@ -1500,25 +1958,61 @@
 	return count;
 }
 
-static void sdhci_msm_check_power_status(struct sdhci_host *host)
+static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
 {
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 	struct sdhci_msm_host *msm_host = pltfm_host->priv;
-	int ret = 0;
+	unsigned long flags;
+	bool done = false;
+	u32 io_sig_sts;
 
-	pr_debug("%s: %s: power status before waiting 0x%x\n",
-		mmc_hostname(host->mmc), __func__,
-		readb_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL));
+	spin_lock_irqsave(&host->lock, flags);
+	pr_debug("%s: %s: request %d curr_pwr_state %x curr_io_level %x\n",
+			mmc_hostname(host->mmc), __func__, req_type,
+			msm_host->curr_pwr_state, msm_host->curr_io_level);
+	io_sig_sts = readl_relaxed(msm_host->core_mem + CORE_GENERICS);
+	/*
+	 * The IRQ for request type IO High/Low will be generated when -
+	 * 1. SWITCHABLE_SIGNALLING_VOL is enabled in HW.
+	 * 2. If 1 is true and when there is a state change in 1.8V enable
+	 * bit (bit 3) of SDHCI_HOST_CONTROL2 register. The reset state of
+	 * that bit is 0 which indicates 3.3V IO voltage. So, when MMC core
+	 * layer tries to set it to 3.3V before card detection happens, the
+	 * IRQ doesn't get triggered as there is no state change in this bit.
+	 * The driver already handles this case by changing the IO voltage
+	 * level to high as part of controller power up sequence. Hence, check
+	 * for host->pwr to handle a case where IO voltage high request is
+	 * issued even before controller power up.
+	 */
+	if (req_type & (REQ_IO_HIGH | REQ_IO_LOW)) {
+		if (!(io_sig_sts & SWITCHABLE_SIGNALLING_VOL) ||
+				((req_type & REQ_IO_HIGH) && !host->pwr)) {
+			pr_debug("%s: do not wait for power IRQ that never comes\n",
+					mmc_hostname(host->mmc));
+			spin_unlock_irqrestore(&host->lock, flags);
+			return;
+		}
+	}
 
-	ret = wait_event_interruptible(msm_host->pwr_irq_wait,
-				       (readb_relaxed(msm_host->core_mem +
-						      CORE_PWRCTL_CTL)) != 0x0);
-	if (ret)
-		pr_warning("%s: %s: returned due to error %d\n",
-				mmc_hostname(host->mmc), __func__, ret);
-	pr_debug("%s: %s: ret %d power status after handling power IRQ 0x%x\n",
-		mmc_hostname(host->mmc), __func__, ret,
-		readb_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL));
+	if ((req_type & msm_host->curr_pwr_state) ||
+			(req_type & msm_host->curr_io_level))
+		done = true;
+	spin_unlock_irqrestore(&host->lock, flags);
+
+	/*
+	 * This is needed here to hanlde a case where IRQ gets
+	 * triggered even before this function is called so that
+	 * x->done counter of completion gets reset. Otherwise,
+	 * next call to wait_for_completion returns immediately
+	 * without actually waiting for the IRQ to be handled.
+	 */
+	if (done)
+		init_completion(&msm_host->pwr_irq_completion);
+	else
+		wait_for_completion(&msm_host->pwr_irq_completion);
+
+	pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
+			__func__, req_type);
 }
 
 static void sdhci_msm_toggle_cdr(struct sdhci_host *host, bool enable)
@@ -1538,76 +2032,485 @@
 	return SDHCI_MSM_MAX_SEGMENTS;
 }
 
-void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
+static unsigned int sdhci_msm_get_min_clock(struct sdhci_host *host)
 {
-	int rc;
 	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
 	struct sdhci_msm_host *msm_host = pltfm_host->priv;
-	unsigned long flags;
 
-	if (clock && !atomic_read(&msm_host->clks_on)) {
-		pr_debug("%s: request to enable clock at rate %u\n",
-				mmc_hostname(host->mmc), clock);
+	return msm_host->pdata->sup_clk_table[0];
+}
+
+static unsigned int sdhci_msm_get_max_clock(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int max_clk_index = msm_host->pdata->sup_clk_cnt;
+
+	return msm_host->pdata->sup_clk_table[max_clk_index - 1];
+}
+
+static unsigned int sdhci_msm_get_sup_clk_rate(struct sdhci_host *host,
+						u32 req_clk)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	unsigned int sel_clk = -1;
+	unsigned char cnt;
+
+	if (req_clk < sdhci_msm_get_min_clock(host)) {
+		sel_clk = sdhci_msm_get_min_clock(host);
+		return sel_clk;
+	}
+
+	for (cnt = 0; cnt < msm_host->pdata->sup_clk_cnt; cnt++) {
+		if (msm_host->pdata->sup_clk_table[cnt] > req_clk) {
+			break;
+		} else if (msm_host->pdata->sup_clk_table[cnt] == req_clk) {
+			sel_clk = msm_host->pdata->sup_clk_table[cnt];
+			break;
+		} else {
+			sel_clk = msm_host->pdata->sup_clk_table[cnt];
+		}
+	}
+	return sel_clk;
+}
+
+static int sdhci_msm_enable_controller_clock(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int rc = 0;
+
+	if (atomic_read(&msm_host->controller_clock))
+		return 0;
+
+	sdhci_msm_bus_voting(host, 1);
+
+	if (!IS_ERR(msm_host->pclk)) {
+		rc = clk_prepare_enable(msm_host->pclk);
+		if (rc) {
+			pr_err("%s: %s: failed to enable the pclk with error %d\n",
+			       mmc_hostname(host->mmc), __func__, rc);
+			goto remove_vote;
+		}
+	}
+
+	rc = clk_prepare_enable(msm_host->clk);
+	if (rc) {
+		pr_err("%s: %s: failed to enable the host-clk with error %d\n",
+		       mmc_hostname(host->mmc), __func__, rc);
+		goto disable_pclk;
+	}
+
+	atomic_set(&msm_host->controller_clock, 1);
+	pr_debug("%s: %s: enabled controller clock\n",
+			mmc_hostname(host->mmc), __func__);
+	goto out;
+
+disable_pclk:
+	if (!IS_ERR(msm_host->pclk))
+		clk_disable_unprepare(msm_host->pclk);
+remove_vote:
+	if (msm_host->msm_bus_vote.client_handle)
+		sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+out:
+	return rc;
+}
+
+
+
+static int sdhci_msm_prepare_clocks(struct sdhci_host *host, bool enable)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int rc = 0;
+
+	if (enable && !atomic_read(&msm_host->clks_on)) {
+		pr_debug("%s: request to enable clocks\n",
+				mmc_hostname(host->mmc));
+
+		/*
+		 * The bus-width or the clock rate might have changed
+		 * after controller clocks are enbaled, update bus vote
+		 * in such case.
+		 */
+		if (atomic_read(&msm_host->controller_clock))
+			sdhci_msm_bus_voting(host, 1);
+
+		rc = sdhci_msm_enable_controller_clock(host);
+		if (rc)
+			goto remove_vote;
+
 		if (!IS_ERR_OR_NULL(msm_host->bus_clk)) {
 			rc = clk_prepare_enable(msm_host->bus_clk);
 			if (rc) {
 				pr_err("%s: %s: failed to enable the bus-clock with error %d\n",
 					mmc_hostname(host->mmc), __func__, rc);
-				goto out;
+				goto disable_controller_clk;
 			}
 		}
-		if (!IS_ERR(msm_host->pclk)) {
-			rc = clk_prepare_enable(msm_host->pclk);
+		if (!IS_ERR(msm_host->ff_clk)) {
+			rc = clk_prepare_enable(msm_host->ff_clk);
 			if (rc) {
-				pr_err("%s: %s: failed to enable the pclk with error %d\n",
+				pr_err("%s: %s: failed to enable the ff_clk with error %d\n",
 					mmc_hostname(host->mmc), __func__, rc);
 				goto disable_bus_clk;
 			}
 		}
-		rc = clk_prepare_enable(msm_host->clk);
-		if (rc) {
-			pr_err("%s: %s: failed to enable the host-clk with error %d\n",
-				mmc_hostname(host->mmc), __func__, rc);
-			goto disable_pclk;
+		if (!IS_ERR(msm_host->sleep_clk)) {
+			rc = clk_prepare_enable(msm_host->sleep_clk);
+			if (rc) {
+				pr_err("%s: %s: failed to enable the sleep_clk with error %d\n",
+					mmc_hostname(host->mmc), __func__, rc);
+				goto disable_ff_clk;
+			}
 		}
 		mb();
-		atomic_set(&msm_host->clks_on, 1);
 
-	} else if (!clock && atomic_read(&msm_host->clks_on)) {
-		pr_debug("%s: request to disable clocks\n",
-				mmc_hostname(host->mmc));
+	} else if (!enable && atomic_read(&msm_host->clks_on)) {
 		sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
 		mb();
+		/*
+		 * During 1.8V signal switching the clock source must
+		 * still be ON as it requires accessing SDHC
+		 * registers (SDHCi host control2 register bit 3 must
+		 * be written and polled after stopping the SDCLK).
+		 */
+		if (host->mmc->card_clock_off)
+			return 0;
+		pr_debug("%s: request to disable clocks\n",
+				mmc_hostname(host->mmc));
+		if (!IS_ERR_OR_NULL(msm_host->sleep_clk))
+			clk_disable_unprepare(msm_host->sleep_clk);
+		if (!IS_ERR_OR_NULL(msm_host->ff_clk))
+			clk_disable_unprepare(msm_host->ff_clk);
 		clk_disable_unprepare(msm_host->clk);
 		if (!IS_ERR(msm_host->pclk))
 			clk_disable_unprepare(msm_host->pclk);
 		if (!IS_ERR_OR_NULL(msm_host->bus_clk))
 			clk_disable_unprepare(msm_host->bus_clk);
-		atomic_set(&msm_host->clks_on, 0);
+
+		atomic_set(&msm_host->controller_clock, 0);
+		sdhci_msm_bus_voting(host, 0);
 	}
-	spin_lock_irqsave(&host->lock, flags);
-	host->clock = clock;
-	spin_unlock_irqrestore(&host->lock, flags);
+	atomic_set(&msm_host->clks_on, enable);
 	goto out;
-disable_pclk:
-	if (!IS_ERR_OR_NULL(msm_host->pclk))
-		clk_disable_unprepare(msm_host->pclk);
+disable_ff_clk:
+	if (!IS_ERR_OR_NULL(msm_host->ff_clk))
+		clk_disable_unprepare(msm_host->ff_clk);
 disable_bus_clk:
 	if (!IS_ERR_OR_NULL(msm_host->bus_clk))
 		clk_disable_unprepare(msm_host->bus_clk);
+disable_controller_clk:
+	if (!IS_ERR_OR_NULL(msm_host->clk))
+		clk_disable_unprepare(msm_host->clk);
+	if (!IS_ERR_OR_NULL(msm_host->pclk))
+		clk_disable_unprepare(msm_host->pclk);
+	atomic_set(&msm_host->controller_clock, 0);
+remove_vote:
+	if (msm_host->msm_bus_vote.client_handle)
+		sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
 out:
-	return;
+	return rc;
+}
+
+static void sdhci_msm_set_clock(struct sdhci_host *host, unsigned int clock)
+{
+	int rc;
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	struct mmc_ios	curr_ios = host->mmc->ios;
+	u32 sup_clock, ddr_clock;
+	bool curr_pwrsave;
+
+	if (!clock) {
+		/*
+		 * disable pwrsave to ensure clock is not auto-gated until
+		 * the rate is >400KHz (initialization complete).
+		 */
+		writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
+			~CORE_CLK_PWRSAVE, host->ioaddr + CORE_VENDOR_SPEC);
+		sdhci_msm_prepare_clocks(host, false);
+		host->clock = clock;
+		goto out;
+	}
+
+	rc = sdhci_msm_prepare_clocks(host, true);
+	if (rc)
+		goto out;
+
+	curr_pwrsave = !!(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) &
+			  CORE_CLK_PWRSAVE);
+	if ((clock > 400000) &&
+	    !curr_pwrsave && mmc_host_may_gate_card(host->mmc->card))
+		writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
+				| CORE_CLK_PWRSAVE,
+				host->ioaddr + CORE_VENDOR_SPEC);
+	/*
+	 * Disable pwrsave for a newly added card if doesn't allow clock
+	 * gating.
+	 */
+	else if (curr_pwrsave && !mmc_host_may_gate_card(host->mmc->card))
+		writel_relaxed(readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
+				& ~CORE_CLK_PWRSAVE,
+				host->ioaddr + CORE_VENDOR_SPEC);
+
+	sup_clock = sdhci_msm_get_sup_clk_rate(host, clock);
+	if ((curr_ios.timing == MMC_TIMING_UHS_DDR50) ||
+		(curr_ios.timing == MMC_TIMING_MMC_HS400)) {
+		/*
+		 * The SDHC requires internal clock frequency to be double the
+		 * actual clock that will be set for DDR mode. The controller
+		 * uses the faster clock(100/400MHz) for some of its parts and
+		 * send the actual required clock (50/200MHz) to the card.
+		 */
+		ddr_clock = clock * 2;
+		sup_clock = sdhci_msm_get_sup_clk_rate(host,
+				ddr_clock);
+	}
+
+	/*
+	 * In general all timing modes are controlled via UHS mode select in
+	 * Host Control2 register. eMMC specific HS200/HS400 doesn't have
+	 * their respective modes defined here, hence we use these values.
+	 *
+	 * HS200 - SDR104 (Since they both are equivalent in functionality)
+	 * HS400 - This involves multiple configurations
+	 *		Initially SDR104 - when tuning is required as HS200
+	 *		Then when switching to DDR @ 400MHz (HS400) we use
+	 *		the vendor specific HC_SELECT_IN to control the mode.
+	 *
+	 * In addition to controlling the modes we also need to select the
+	 * correct input clock for DLL depending on the mode.
+	 *
+	 * HS400 - divided clock (free running MCLK/2)
+	 * All other modes - default (free running MCLK)
+	 */
+	if (curr_ios.timing == MMC_TIMING_MMC_HS400) {
+		/* Select the divided clock (free running MCLK/2) */
+		writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
+					& ~CORE_HC_MCLK_SEL_MASK)
+					| CORE_HC_MCLK_SEL_HS400),
+					host->ioaddr + CORE_VENDOR_SPEC);
+		/*
+		 * Select HS400 mode using the HC_SELECT_IN from VENDOR SPEC
+		 * register
+		 */
+		if (msm_host->tuning_done && !msm_host->calibration_done) {
+			/*
+			 * Write 0x6 to HC_SELECT_IN and 1 to HC_SELECT_IN_EN
+			 * field in VENDOR_SPEC_FUNC
+			 */
+			writel_relaxed((readl_relaxed(host->ioaddr + \
+					CORE_VENDOR_SPEC)
+					| CORE_HC_SELECT_IN_HS400
+					| CORE_HC_SELECT_IN_EN),
+					host->ioaddr + CORE_VENDOR_SPEC);
+		}
+	} else {
+		/* Select the default clock (free running MCLK) */
+		writel_relaxed(((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
+					& ~CORE_HC_MCLK_SEL_MASK)
+					| CORE_HC_MCLK_SEL_DFLT),
+					host->ioaddr + CORE_VENDOR_SPEC);
+
+		/*
+		 * Disable HC_SELECT_IN to be able to use the UHS mode select
+		 * configuration from Host Control2 register for all other
+		 * modes.
+		 *
+		 * Write 0 to HC_SELECT_IN and HC_SELECT_IN_EN field
+		 * in VENDOR_SPEC_FUNC
+		 */
+		writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC)
+				& ~CORE_HC_SELECT_IN_EN
+				& ~CORE_HC_SELECT_IN_MASK),
+				host->ioaddr + CORE_VENDOR_SPEC);
+	}
+	mb();
+
+	if (sup_clock != msm_host->clk_rate) {
+		pr_debug("%s: %s: setting clk rate to %u\n",
+				mmc_hostname(host->mmc), __func__, sup_clock);
+		rc = clk_set_rate(msm_host->clk, sup_clock);
+		if (rc) {
+			pr_err("%s: %s: Failed to set rate %u for host-clk : %d\n",
+					mmc_hostname(host->mmc), __func__,
+					sup_clock, rc);
+			goto out;
+		}
+		msm_host->clk_rate = sup_clock;
+		host->clock = clock;
+		/*
+		 * Update the bus vote in case of frequency change due to
+		 * clock scaling.
+		 */
+		sdhci_msm_bus_voting(host, 1);
+	}
+out:
+	sdhci_set_clock(host, clock);
+}
+
+static void sdhci_msm_set_uhs_signaling(struct sdhci_host *host,
+					unsigned int uhs)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	u16 ctrl_2;
+
+	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
+	/* Select Bus Speed Mode for host */
+	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+	if (uhs == MMC_TIMING_MMC_HS400)
+		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+	else if (uhs == MMC_TIMING_MMC_HS200)
+		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+	else if (uhs == MMC_TIMING_UHS_SDR12)
+		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
+	else if (uhs == MMC_TIMING_UHS_SDR25)
+		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
+	else if (uhs == MMC_TIMING_UHS_SDR50)
+		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
+	else if (uhs == MMC_TIMING_UHS_SDR104)
+		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
+	else if (uhs == MMC_TIMING_UHS_DDR50)
+		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
+	/*
+	 * When clock frquency is less than 100MHz, the feedback clock must be
+	 * provided and DLL must not be used so that tuning can be skipped. To
+	 * provide feedback clock, the mode selection can be any value less
+	 * than 3'b011 in bits [2:0] of HOST CONTROL2 register.
+	 */
+	if (host->clock <= CORE_FREQ_100MHZ) {
+		if ((uhs == MMC_TIMING_MMC_HS400) ||
+		    (uhs == MMC_TIMING_MMC_HS200) ||
+		    (uhs == MMC_TIMING_UHS_SDR104))
+			ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
+
+		/*
+		 * Make sure DLL is disabled when not required
+		 *
+		 * Write 1 to DLL_RST bit of DLL_CONFIG register
+		 */
+		writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+				| CORE_DLL_RST),
+				host->ioaddr + CORE_DLL_CONFIG);
+
+		/* Write 1 to DLL_PDN bit of DLL_CONFIG register */
+		writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG)
+				| CORE_DLL_PDN),
+				host->ioaddr + CORE_DLL_CONFIG);
+		mb();
+
+		/*
+		 * The DLL needs to be restored and CDCLP533 recalibrated
+		 * when the clock frequency is set back to 400MHz.
+		 */
+		msm_host->calibration_done = false;
+	}
+
+	pr_debug("%s: %s-clock:%u uhs mode:%u ctrl_2:0x%x\n",
+		mmc_hostname(host->mmc), __func__, host->clock, uhs, ctrl_2);
+	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
+
+}
+
+#define MAX_TEST_BUS 20
+
+void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
+{
+	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
+	struct sdhci_msm_host *msm_host = pltfm_host->priv;
+	int tbsel, tbsel2;
+	int i, index = 0;
+	u32 test_bus_val = 0;
+	u32 debug_reg[MAX_TEST_BUS] = {0};
+
+	pr_info("----------- VENDOR REGISTER DUMP -----------\n");
+	pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
+		readl_relaxed(msm_host->core_mem + CORE_MCI_DATA_CNT),
+		readl_relaxed(msm_host->core_mem + CORE_MCI_FIFO_CNT),
+		readl_relaxed(msm_host->core_mem + CORE_MCI_STATUS));
+	pr_info("DLL cfg:  0x%08x | DLL sts:  0x%08x | SDCC ver: 0x%08x\n",
+		readl_relaxed(host->ioaddr + CORE_DLL_CONFIG),
+		readl_relaxed(host->ioaddr + CORE_DLL_STATUS),
+		readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION));
+	pr_info("Vndr func: 0x%08x | Vndr adma err : addr0: 0x%08x addr1: 0x%08x\n",
+		readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC),
+		readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_ADMA_ERR_ADDR0),
+		readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC_ADMA_ERR_ADDR1));
+
+	/*
+	 * tbsel indicates [2:0] bits and tbsel2 indicates [7:4] bits
+	 * of CORE_TESTBUS_CONFIG register.
+	 *
+	 * To select test bus 0 to 7 use tbsel and to select any test bus
+	 * above 7 use (tbsel2 | tbsel) to get the test bus number. For eg,
+	 * to select test bus 14, write 0x1E to CORE_TESTBUS_CONFIG register
+	 * i.e., tbsel2[7:4] = 0001, tbsel[2:0] = 110.
+	 */
+	for (tbsel2 = 0; tbsel2 < 3; tbsel2++) {
+		for (tbsel = 0; tbsel < 8; tbsel++) {
+			if (index >= MAX_TEST_BUS)
+				break;
+			test_bus_val = (tbsel2 << CORE_TESTBUS_SEL2_BIT) |
+					tbsel | CORE_TESTBUS_ENA;
+			writel_relaxed(test_bus_val,
+				msm_host->core_mem + CORE_TESTBUS_CONFIG);
+			debug_reg[index++] = readl_relaxed(msm_host->core_mem +
+							CORE_SDCC_DEBUG_REG);
+		}
+	}
+	for (i = 0; i < MAX_TEST_BUS; i = i + 4)
+		pr_info(" Test bus[%d to %d]: 0x%08x 0x%08x 0x%08x 0x%08x\n",
+				i, i + 3, debug_reg[i], debug_reg[i+1],
+				debug_reg[i+2], debug_reg[i+3]);
+	/* Disable test bus */
+	writel_relaxed(~CORE_TESTBUS_ENA, msm_host->core_mem +
+			CORE_TESTBUS_CONFIG);
 }
 
 static struct sdhci_ops sdhci_msm_ops = {
+	.set_uhs_signaling = sdhci_msm_set_uhs_signaling,
 	.check_power_status = sdhci_msm_check_power_status,
 	.platform_execute_tuning = sdhci_msm_execute_tuning,
 	.toggle_cdr = sdhci_msm_toggle_cdr,
 	.get_max_segments = sdhci_msm_max_segs,
 	.set_clock = sdhci_msm_set_clock,
-	.platform_bus_voting = sdhci_msm_bus_voting,
+	.get_min_clock = sdhci_msm_get_min_clock,
+	.get_max_clock = sdhci_msm_get_max_clock,
+	.dump_vendor_regs = sdhci_msm_dump_vendor_regs,
+	.config_auto_tuning_cmd = sdhci_msm_config_auto_tuning_cmd,
+	.enable_controller_clock = sdhci_msm_enable_controller_clock,
 };
 
+static void sdhci_set_default_hw_caps(struct sdhci_msm_host *msm_host,
+		struct sdhci_host *host)
+{
+	u32 version, caps;
+	u16 minor;
+	u8 major;
+
+	version = readl_relaxed(msm_host->core_mem + CORE_MCI_VERSION);
+	major = (version & CORE_VERSION_MAJOR_MASK) >>
+			CORE_VERSION_MAJOR_SHIFT;
+	minor = version & CORE_VERSION_TARGET_MASK;
+
+	/*
+	 * Starting with SDCC 5 controller (core major version = 1)
+	 * controller won't advertise 3.0v and 8-bit features except for
+	 * some targets.
+	 */
+	if (major >= 1 && minor != 0x11 && minor != 0x12) {
+		caps = CORE_3_0V_SUPPORT;
+		if (msm_host->pdata->mmc_bus_width == MMC_CAP_8_BIT_DATA)
+			caps |= CORE_8_BIT_SUPPORT;
+		writel_relaxed(
+			(readl_relaxed(host->ioaddr + SDHCI_CAPABILITIES) |
+			caps), host->ioaddr + CORE_VENDOR_SPEC_CAPABILITIES0);
+	}
+}
+
 static int sdhci_msm_probe(struct platform_device *pdev)
 {
 	struct sdhci_host *host;
@@ -1615,7 +2518,8 @@
 	struct sdhci_msm_host *msm_host;
 	struct resource *core_memres = NULL;
 	int ret = 0, pwr_irq = 0, dead = 0;
-	u32 host_version;
+	u16 host_version;
+	u32 pwr, irq_status, irq_ctl;
 
 	pr_debug("%s: Enter %s\n", dev_name(&pdev->dev), __func__);
 	msm_host = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_msm_host),
@@ -1624,7 +2528,6 @@
 		ret = -ENOMEM;
 		goto out;
 	}
-	init_waitqueue_head(&msm_host->pwr_irq_wait);
 
 	msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops;
 	host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0);
@@ -1640,6 +2543,19 @@
 
 	/* Extract platform data */
 	if (pdev->dev.of_node) {
+		ret = of_alias_get_id(pdev->dev.of_node, "sdhc");
+		if (ret < 0) {
+			dev_err(&pdev->dev, "Failed to get slot index %d\n",
+				ret);
+			goto pltfm_free;
+		}
+		if (disable_slots & (1 << (ret - 1))) {
+			dev_info(&pdev->dev, "%s: Slot %d disabled\n", __func__,
+				ret);
+			ret = -ENODEV;
+			goto pltfm_free;
+		}
+
 		msm_host->pdata = sdhci_msm_populate_pdata(&pdev->dev);
 		if (!msm_host->pdata) {
 			dev_err(&pdev->dev, "DT parsing error\n");
@@ -1671,6 +2587,7 @@
 		if (ret)
 			goto bus_clk_disable;
 	}
+	atomic_set(&msm_host->controller_clock, 1);
 
 	/* Setup SDC MMC clock */
 	msm_host->clk = devm_clk_get(&pdev->dev, "core_clk");
@@ -1679,16 +2596,51 @@
 		goto pclk_disable;
 	}
 
+	/* Set to the minimum supported clock frequency */
+	ret = clk_set_rate(msm_host->clk, sdhci_msm_get_min_clock(host));
+	if (ret) {
+		dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
+		goto pclk_disable;
+	}
 	ret = clk_prepare_enable(msm_host->clk);
 	if (ret)
 		goto pclk_disable;
 
+	msm_host->clk_rate = sdhci_msm_get_min_clock(host);
 	atomic_set(&msm_host->clks_on, 1);
+
+	/* Setup CDC calibration fixed feedback clock */
+	msm_host->ff_clk = devm_clk_get(&pdev->dev, "cal_clk");
+	if (!IS_ERR(msm_host->ff_clk)) {
+		ret = clk_prepare_enable(msm_host->ff_clk);
+		if (ret)
+			goto clk_disable;
+	}
+
+	/* Setup CDC calibration sleep clock */
+	msm_host->sleep_clk = devm_clk_get(&pdev->dev, "sleep_clk");
+	if (!IS_ERR(msm_host->sleep_clk)) {
+		ret = clk_prepare_enable(msm_host->sleep_clk);
+		if (ret)
+			goto ff_clk_disable;
+	}
+
+	msm_host->saved_tuning_phase = INVALID_TUNING_PHASE;
+
+	ret = sdhci_msm_bus_register(msm_host, pdev);
+	if (ret)
+		goto sleep_clk_disable;
+
+	if (msm_host->msm_bus_vote.client_handle)
+		INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
+				  sdhci_msm_bus_work);
+	sdhci_msm_bus_voting(host, 1);
+
 	/* Setup regulators */
 	ret = sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, true);
 	if (ret) {
 		dev_err(&pdev->dev, "Regulator setup failed (%d)\n", ret);
-		goto clk_disable;
+		goto bus_unregister;
 	}
 
 	/* Reset the core and Enable SDHC mode */
@@ -1703,11 +2655,54 @@
 		goto vreg_deinit;
 	}
 
+	/* Unset HC_MODE_EN bit in HC_MODE register */
+	writel_relaxed(0, (msm_host->core_mem + CORE_HC_MODE));
+
 	/* Set SW_RST bit in POWER register (Offset 0x0) */
-	writel_relaxed(CORE_SW_RST, msm_host->core_mem + CORE_POWER);
+	writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_POWER) |
+			CORE_SW_RST, msm_host->core_mem + CORE_POWER);
+	/*
+	 * SW reset can take upto 10HCLK + 15MCLK cycles.
+	 * Calculating based on min clk rates (hclk = 27MHz,
+	 * mclk = 400KHz) it comes to ~40us. Let's poll for
+	 * max. 1ms for reset completion.
+	 */
+	ret = readl_poll_timeout(msm_host->core_mem + CORE_POWER,
+			pwr, !(pwr & CORE_SW_RST), 100, 10);
+
+	if (ret) {
+		dev_err(&pdev->dev, "reset failed (%d)\n", ret);
+		goto vreg_deinit;
+	}
 	/* Set HC_MODE_EN bit in HC_MODE register */
 	writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE));
 
+	/* Set FF_CLK_SW_RST_DIS bit in HC_MODE register */
+	writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_HC_MODE) |
+			FF_CLK_SW_RST_DIS, msm_host->core_mem + CORE_HC_MODE);
+
+	sdhci_set_default_hw_caps(msm_host, host);
+	/*
+	 * CORE_SW_RST above may trigger power irq if previous status of PWRCTL
+	 * was either BUS_ON or IO_HIGH_V. So before we enable the power irq
+	 * interrupt in GIC (by registering the interrupt handler), we need to
+	 * ensure that any pending power irq interrupt status is acknowledged
+	 * otherwise power irq interrupt handler would be fired prematurely.
+	 */
+	irq_status = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_STATUS);
+	writel_relaxed(irq_status, (msm_host->core_mem + CORE_PWRCTL_CLEAR));
+	irq_ctl = readl_relaxed(msm_host->core_mem + CORE_PWRCTL_CTL);
+	if (irq_status & (CORE_PWRCTL_BUS_ON | CORE_PWRCTL_BUS_OFF))
+		irq_ctl |= CORE_PWRCTL_BUS_SUCCESS;
+	if (irq_status & (CORE_PWRCTL_IO_HIGH | CORE_PWRCTL_IO_LOW))
+		irq_ctl |= CORE_PWRCTL_IO_SUCCESS;
+	writel_relaxed(irq_ctl, (msm_host->core_mem + CORE_PWRCTL_CTL));
+	/*
+	 * Ensure that above writes are propogated before interrupt enablement
+	 * in GIC.
+	 */
+	mb();
+
 	/*
 	 * Following are the deviations from SDHC spec v3.0 -
 	 * 1. Card detection is handled using separate GPIO.
@@ -1715,8 +2710,16 @@
 	 */
 	host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION;
 	host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE;
+	host->quirks |= SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN;
+	host->quirks2 |= SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK;
+	host->quirks2 |= SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD;
+	host->quirks2 |= SDHCI_QUIRK2_BROKEN_PRESET_VALUE;
+	host->quirks2 |= SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT;
 
-	host_version = readl_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
+	if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
+		host->quirks2 |= SDHCI_QUIRK2_DIVIDE_TOUT_BY_4;
+
+	host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION));
 	dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n",
 		host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >>
 		  SDHCI_VENDOR_VER_SHIFT));
@@ -1734,6 +2737,8 @@
 		host->quirks2 |= SDHCI_QUIRK2_RDWR_TX_ACTIVE_EOT;
 	}
 
+	host->quirks2 |= SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR;
+
 	/* Setup PWRCTL irq */
 	pwr_irq = platform_get_irq_byname(pdev, "pwr_irq");
 	if (pwr_irq < 0) {
@@ -1761,37 +2766,45 @@
 	/* Set host capabilities */
 	msm_host->mmc->caps |= msm_host->pdata->mmc_bus_width;
 	msm_host->mmc->caps |= msm_host->pdata->caps;
-	msm_host->mmc->caps |= MMC_CAP_HW_RESET;
 	msm_host->mmc->caps2 |= msm_host->pdata->caps2;
 	msm_host->mmc->caps2 |= MMC_CAP2_PACKED_WR;
 	msm_host->mmc->caps2 |= MMC_CAP2_PACKED_WR_CONTROL;
+	msm_host->mmc->caps2 |= MMC_CAP2_CLK_SCALE;
+	msm_host->mmc->caps2 |= MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE;
+	msm_host->mmc->pm_caps |= MMC_PM_KEEP_POWER;
 
 	if (msm_host->pdata->nonremovable)
 		msm_host->mmc->caps |= MMC_CAP_NONREMOVABLE;
 
 	host->cpu_dma_latency_us = msm_host->pdata->cpu_dma_latency_us;
 
-	ret = sdhci_msm_bus_register(msm_host, pdev);
-	if (ret)
-		goto vreg_deinit;
+	init_completion(&msm_host->pwr_irq_completion);
 
-	if (msm_host->msm_bus_vote.client_handle)
-		INIT_DELAYED_WORK(&msm_host->msm_bus_vote.vote_work,
-				  sdhci_msm_bus_work);
+	if (gpio_is_valid(msm_host->pdata->status_gpio)) {
+		ret = mmc_gpio_request_cd(msm_host->mmc,
+				msm_host->pdata->status_gpio, 0);
+		if (ret) {
+			dev_err(&pdev->dev, "%s: Failed to request card detection IRQ %d\n",
+					__func__, ret);
+			goto vreg_deinit;
+		}
+	}
+
+	if ((sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) &&
+		(dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(64)))) {
+		host->dma_mask = DMA_BIT_MASK(64);
+		mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
+	} else if (dma_supported(mmc_dev(host->mmc), DMA_BIT_MASK(32))) {
+		host->dma_mask = DMA_BIT_MASK(32);
+		mmc_dev(host->mmc)->dma_mask = &host->dma_mask;
+	} else {
+		dev_err(&pdev->dev, "%s: Failed to set dma mask\n", __func__);
+	}
 
 	ret = sdhci_add_host(host);
 	if (ret) {
 		dev_err(&pdev->dev, "Add host failed (%d)\n", ret);
-		goto bus_unregister;
-	}
-
-	 /* Set core clk rate, optionally override from dts */
-	if (msm_host->pdata->max_clk)
-		host->max_clk = msm_host->pdata->max_clk;
-	ret = clk_set_rate(msm_host->clk, host->max_clk);
-	if (ret) {
-		dev_err(&pdev->dev, "MClk rate set failed (%d)\n", ret);
-		goto remove_host;
+		goto vreg_deinit;
 	}
 
 	msm_host->msm_bus_vote.max_bus_bw.show = show_sdhci_max_bus_bw;
@@ -1804,16 +2817,49 @@
 	if (ret)
 		goto remove_host;
 
+	if (!gpio_is_valid(msm_host->pdata->status_gpio)) {
+		msm_host->polling.show = show_polling;
+		msm_host->polling.store = store_polling;
+		sysfs_attr_init(&msm_host->polling.attr);
+		msm_host->polling.attr.name = "polling";
+		msm_host->polling.attr.mode = S_IRUGO | S_IWUSR;
+		ret = device_create_file(&pdev->dev, &msm_host->polling);
+		if (ret)
+			goto remove_max_bus_bw_file;
+	}
+
+	msm_host->auto_cmd21_attr.show = show_auto_cmd21;
+	msm_host->auto_cmd21_attr.store = store_auto_cmd21;
+	sysfs_attr_init(&msm_host->auto_cmd21_attr.attr);
+	msm_host->auto_cmd21_attr.attr.name = "enable_auto_cmd21";
+	msm_host->auto_cmd21_attr.attr.mode = S_IRUGO | S_IWUSR;
+	ret = device_create_file(&pdev->dev, &msm_host->auto_cmd21_attr);
+	if (ret) {
+		pr_err("%s: %s: failed creating auto-cmd21 attr: %d\n",
+		       mmc_hostname(host->mmc), __func__, ret);
+		device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
+	}
+
 	/* Successful initialization */
 	goto out;
 
+remove_max_bus_bw_file:
+	device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
 remove_host:
 	dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff);
 	sdhci_remove_host(host, dead);
-bus_unregister:
-	sdhci_msm_bus_unregister(msm_host);
 vreg_deinit:
 	sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
+bus_unregister:
+	if (msm_host->msm_bus_vote.client_handle)
+		sdhci_msm_bus_cancel_work_and_set_vote(host, 0);
+	sdhci_msm_bus_unregister(msm_host);
+sleep_clk_disable:
+	if (!IS_ERR(msm_host->sleep_clk))
+		clk_disable_unprepare(msm_host->sleep_clk);
+ff_clk_disable:
+	if (!IS_ERR(msm_host->ff_clk))
+		clk_disable_unprepare(msm_host->ff_clk);
 clk_disable:
 	if (!IS_ERR(msm_host->clk))
 		clk_disable_unprepare(msm_host->clk);
@@ -1840,9 +2886,12 @@
 			0xffffffff);
 
 	pr_debug("%s: %s\n", dev_name(&pdev->dev), __func__);
+	if (!gpio_is_valid(msm_host->pdata->status_gpio))
+		device_remove_file(&pdev->dev, &msm_host->polling);
 	device_remove_file(&pdev->dev, &msm_host->msm_bus_vote.max_bus_bw);
 	sdhci_remove_host(host, dead);
 	sdhci_pltfm_free(pdev);
+
 	sdhci_msm_vreg_init(&pdev->dev, msm_host->pdata, false);
 
 	if (pdata->pin_data)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index bb8e95b..60be7c5 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -30,6 +30,7 @@
 #include <linux/mmc/card.h>
 #include <linux/mmc/sdio.h>
 #include <linux/mmc/slot-gpio.h>
+#include <linux/mmc/sdio.h>
 
 #include "sdhci.h"
 
@@ -45,62 +46,119 @@
 
 static void sdhci_finish_data(struct sdhci_host *);
 
+static bool sdhci_check_state(struct sdhci_host *);
+
 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
 
+static void sdhci_dump_state(struct sdhci_host *host)
+{
+	struct mmc_host *mmc = host->mmc;
+
+	#ifdef CONFIG_MMC_CLKGATE
+	pr_info("%s: clk: %d clk-gated: %d claimer: %s pwr: %d\n",
+		mmc_hostname(mmc), host->clock, mmc->clk_gated,
+		mmc->claimer->comm, host->pwr);
+	#else
+	pr_info("%s: clk: %d claimer: %s pwr: %d\n",
+		mmc_hostname(mmc), host->clock,
+		mmc->claimer->comm, host->pwr);
+	#endif
+	pr_info("%s: rpmstatus[pltfm](runtime-suspend:usage_count:disable_depth)(%d:%d:%d)\n",
+		mmc_hostname(mmc), mmc->parent->power.runtime_status,
+		atomic_read(&mmc->parent->power.usage_count),
+		mmc->parent->power.disable_depth);
+}
+
 static void sdhci_dumpregs(struct sdhci_host *host)
 {
-	pr_err(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
-	       mmc_hostname(host->mmc));
+	pr_info(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
+		mmc_hostname(host->mmc));
 
-	pr_err(DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n",
+	pr_info(DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n",
 	       sdhci_readl(host, SDHCI_DMA_ADDRESS),
 	       sdhci_readw(host, SDHCI_HOST_VERSION));
-	pr_err(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n",
+	pr_info(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n",
 	       sdhci_readw(host, SDHCI_BLOCK_SIZE),
 	       sdhci_readw(host, SDHCI_BLOCK_COUNT));
-	pr_err(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
+	pr_info(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",
 	       sdhci_readl(host, SDHCI_ARGUMENT),
 	       sdhci_readw(host, SDHCI_TRANSFER_MODE));
-	pr_err(DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n",
+	pr_info(DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n",
 	       sdhci_readl(host, SDHCI_PRESENT_STATE),
 	       sdhci_readb(host, SDHCI_HOST_CONTROL));
-	pr_err(DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n",
+	pr_info(DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n",
 	       sdhci_readb(host, SDHCI_POWER_CONTROL),
 	       sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
-	pr_err(DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n",
+	pr_info(DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n",
 	       sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
 	       sdhci_readw(host, SDHCI_CLOCK_CONTROL));
-	pr_err(DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n",
+	pr_info(DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n",
 	       sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
 	       sdhci_readl(host, SDHCI_INT_STATUS));
-	pr_err(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
+	pr_info(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",
 	       sdhci_readl(host, SDHCI_INT_ENABLE),
 	       sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
-	pr_err(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
-	       sdhci_readw(host, SDHCI_ACMD12_ERR),
+	pr_info(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",
+		host->auto_cmd_err_sts,
 	       sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
-	pr_err(DRIVER_NAME ": Caps:     0x%08x | Caps_1:   0x%08x\n",
+	pr_info(DRIVER_NAME ": Caps:     0x%08x | Caps_1:   0x%08x\n",
 	       sdhci_readl(host, SDHCI_CAPABILITIES),
 	       sdhci_readl(host, SDHCI_CAPABILITIES_1));
-	pr_err(DRIVER_NAME ": Cmd:      0x%08x | Max curr: 0x%08x\n",
+	pr_info(DRIVER_NAME ": Cmd:      0x%08x | Max curr: 0x%08x\n",
 	       sdhci_readw(host, SDHCI_COMMAND),
 	       sdhci_readl(host, SDHCI_MAX_CURRENT));
-	pr_err(DRIVER_NAME ": Host ctl2: 0x%08x\n",
+	pr_info(DRIVER_NAME ": Resp 1:   0x%08x | Resp 0:   0x%08x\n",
+		sdhci_readl(host, SDHCI_RESPONSE + 0x4),
+		sdhci_readl(host, SDHCI_RESPONSE));
+	pr_info(DRIVER_NAME ": Resp 3:   0x%08x | Resp 2:   0x%08x\n",
+		sdhci_readl(host, SDHCI_RESPONSE + 0xC),
+		sdhci_readl(host, SDHCI_RESPONSE + 0x8));
+	pr_info(DRIVER_NAME ": Host ctl2: 0x%08x\n",
 	       sdhci_readw(host, SDHCI_HOST_CONTROL2));
 
 	if (host->flags & SDHCI_USE_ADMA) {
 		if (host->flags & SDHCI_USE_64_BIT_DMA)
-			pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
+			pr_info(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
 			       readl(host->ioaddr + SDHCI_ADMA_ERROR),
 			       readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI),
 			       readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
 		else
-			pr_err(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
+			pr_info(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
 			       readl(host->ioaddr + SDHCI_ADMA_ERROR),
 			       readl(host->ioaddr + SDHCI_ADMA_ADDRESS));
 	}
 
-	pr_err(DRIVER_NAME ": ===========================================\n");
+	if (host->ops->dump_vendor_regs)
+		host->ops->dump_vendor_regs(host);
+	sdhci_dump_state(host);
+	pr_info(DRIVER_NAME ": ===========================================\n");
+}
+
+#define MAX_PM_QOS_TIMEOUT_VALUE	100000 /* 100 ms */
+static ssize_t
+show_sdhci_pm_qos_tout(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%d us\n", host->pm_qos_timeout_us);
+}
+
+static ssize_t
+store_sdhci_pm_qos_tout(struct device *dev, struct device_attribute *attr,
+		const char *buf, size_t count)
+{
+	struct sdhci_host *host = dev_get_drvdata(dev);
+	uint32_t value;
+	unsigned long flags;
+
+	if (!kstrtou32(buf, 0, &value)) {
+		spin_lock_irqsave(&host->lock, flags);
+		if (value <= MAX_PM_QOS_TIMEOUT_VALUE)
+			host->pm_qos_timeout_us = value;
+		spin_unlock_irqrestore(&host->lock, flags);
+	}
+	return count;
 }
 
 /*****************************************************************************\
@@ -178,6 +236,10 @@
 	/* Wait max 100 ms */
 	timeout = 100;
 
+	if (host->ops->check_power_status && host->pwr &&
+	    (mask & SDHCI_RESET_ALL))
+		host->ops->check_power_status(host, REQ_BUS_OFF);
+
 	/* hw clears the bit when it's done */
 	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {
 		if (timeout == 0) {
@@ -227,7 +289,7 @@
 		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
 		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
 		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
-		    SDHCI_INT_RESPONSE;
+		    SDHCI_INT_RESPONSE | SDHCI_INT_AUTO_CMD_ERR;
 
 	if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
 	    host->tuning_mode == SDHCI_TUNING_MODE_3)
@@ -276,7 +338,7 @@
 
 	spin_lock_irqsave(&host->lock, flags);
 
-	if (host->runtime_suspended)
+	if (host->runtime_suspended || sdhci_check_state(host))
 		goto out;
 
 	if (brightness == LED_OFF)
@@ -660,6 +722,7 @@
 	u8 count;
 	struct mmc_data *data = cmd->data;
 	unsigned target_timeout, current_timeout;
+	u32 curr_clk = 0; /* In KHz */
 
 	/*
 	 * If the host controller provides us with an incorrect timeout
@@ -705,7 +768,14 @@
 	 *     (1) / (2) > 2^6
 	 */
 	count = 0;
-	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
+	if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK) {
+		curr_clk = host->clock / 1000;
+		if (host->quirks2 & SDHCI_QUIRK2_DIVIDE_TOUT_BY_4)
+			curr_clk /= 4;
+		current_timeout = (1 << 13) * 1000 / curr_clk;
+	} else {
+		current_timeout = (1 << 13) * 1000 / host->timeout_clk;
+	}
 	while (current_timeout < target_timeout) {
 		count++;
 		current_timeout <<= 1;
@@ -713,10 +783,12 @@
 			break;
 	}
 
-	if (count >= 0xF) {
-		DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
-		    mmc_hostname(host->mmc), count, cmd->opcode);
-		count = 0xE;
+	if (!(host->quirks2 & SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT)) {
+		if (count >= 0xF) {
+			DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
+			    mmc_hostname(host->mmc), count, cmd->opcode);
+			count = 0xE;
+		}
 	}
 
 	return count;
@@ -773,7 +845,7 @@
 	WARN_ON(host->data);
 
 	/* Sanity checks */
-	BUG_ON(data->blksz * data->blocks > 524288);
+	BUG_ON(data->blksz * data->blocks > host->mmc->max_req_size);
 	BUG_ON(data->blksz > host->mmc->max_blk_size);
 	BUG_ON(data->blocks > 65535);
 
@@ -941,8 +1013,13 @@
 		}
 	}
 
-	if (data->flags & MMC_DATA_READ)
+	if (data->flags & MMC_DATA_READ) {
 		mode |= SDHCI_TRNS_READ;
+		if (host->ops->toggle_cdr)
+			host->ops->toggle_cdr(host, true);
+	}
+	if (host->ops->toggle_cdr && (data->flags & MMC_DATA_WRITE))
+		host->ops->toggle_cdr(host, false);
 	if (host->flags & SDHCI_REQ_USE_DMA)
 		mode |= SDHCI_TRNS_DMA;
 
@@ -1162,6 +1239,8 @@
 	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
 		flags |= SDHCI_CMD_DATA;
 
+	if (cmd->data)
+		host->data_start_time = ktime_get();
 	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
 }
 EXPORT_SYMBOL_GPL(sdhci_send_command);
@@ -1345,6 +1424,10 @@
 clock_set:
 	if (real_div)
 		*actual_clock = (host->max_clk * clk_mul) / real_div;
+
+	if (host->quirks2 & SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK)
+		div = 0;
+
 	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
 	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
 		<< SDHCI_DIVIDER_HI_SHIFT;
@@ -1439,6 +1522,8 @@
 
 	if (pwr == 0) {
 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+		if (host->ops->check_power_status)
+			host->ops->check_power_status(host, REQ_BUS_OFF);
 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
 			sdhci_runtime_pm_bus_off(host);
 	} else {
@@ -1446,20 +1531,27 @@
 		 * Spec says that we should clear the power reg before setting
 		 * a new value. Some controllers don't seem to like this though.
 		 */
-		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
+		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) {
 			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
-
+			if (host->ops->check_power_status)
+				host->ops->check_power_status(host, REQ_BUS_OFF);
+		}
 		/*
 		 * At least the Marvell CaFe chip gets confused if we set the
 		 * voltage and set turn on power at the same time, so set the
 		 * voltage first.
 		 */
-		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
+		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) {
 			sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+			if (host->ops->check_power_status)
+				host->ops->check_power_status(host, REQ_BUS_ON);
+		}
 
 		pwr |= SDHCI_POWER_ON;
 
 		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
+		if (host->ops->check_power_status)
+			host->ops->check_power_status(host, REQ_BUS_ON);
 
 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
 			sdhci_runtime_pm_bus_on(host);
@@ -1507,15 +1599,91 @@
 {
 	struct sdhci_host *host = mmc_priv(mmc);
 
-	if (host->cpu_dma_latency_us)
-		pm_qos_update_request(&host->pm_qos_req_dma,
+	if (host->cpu_dma_latency_us) {
+		/*
+		 * In performance mode, release QoS vote after a timeout to
+		 * make sure back-to-back requests don't suffer from latencies
+		 * that are involved to wake CPU from low power modes in cases
+		 * where the CPU goes into low power mode as soon as QoS vote is
+		 * released.
+		 */
+		if (host->power_policy == SDHCI_PERFORMANCE_MODE)
+			pm_qos_update_request_timeout(&host->pm_qos_req_dma,
+					host->cpu_dma_latency_us,
+					host->pm_qos_timeout_us);
+		else
+			pm_qos_update_request(&host->pm_qos_req_dma,
 					PM_QOS_DEFAULT_VALUE);
+	}
+
 	if (host->ops->platform_bus_voting)
 		host->ops->platform_bus_voting(host, 0);
 
 	return 0;
 }
 
+static inline void sdhci_update_power_policy(struct sdhci_host *host,
+		enum sdhci_power_policy policy)
+{
+	host->power_policy = policy;
+}
+
+static int sdhci_notify_load(struct mmc_host *mmc, enum mmc_load state)
+{
+	int err = 0;
+	struct sdhci_host *host = mmc_priv(mmc);
+
+	switch (state) {
+	case MMC_LOAD_HIGH:
+		sdhci_update_power_policy(host, SDHCI_PERFORMANCE_MODE);
+		break;
+	case MMC_LOAD_LOW:
+		sdhci_update_power_policy(host, SDHCI_POWER_SAVE_MODE);
+		break;
+	default:
+		err = -EINVAL;
+		break;
+	}
+
+	return err;
+}
+
+static bool sdhci_check_state(struct sdhci_host *host)
+{
+	if (!host->clock || !host->pwr)
+		return true;
+	else
+		return false;
+}
+
+static bool sdhci_check_auto_tuning(struct sdhci_host *host,
+				  struct mmc_command *cmd)
+{
+	if (((cmd->opcode != MMC_READ_SINGLE_BLOCK) &&
+	     (cmd->opcode != MMC_READ_MULTIPLE_BLOCK) &&
+	     (cmd->opcode != SD_IO_RW_EXTENDED)) || (host->clock < 100000000))
+		return false;
+	else if (host->mmc->ios.timing == MMC_TIMING_MMC_HS200 ||
+		 host->mmc->ios.timing == MMC_TIMING_UHS_SDR104)
+		return true;
+	else
+		return false;
+}
+
+static int sdhci_get_tuning_cmd(struct sdhci_host *host)
+{
+	if (!host->mmc || !host->mmc->card)
+		return 0;
+	/*
+	 * If we are here, all conditions have already been true
+	 * and the card can either be an eMMC or SD/SDIO
+	 */
+	if (mmc_card_mmc(host->mmc->card))
+		return MMC_SEND_TUNING_BLOCK_HS200;
+	else
+		return MMC_SEND_TUNING_BLOCK;
+}
+
 static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
 {
 	struct sdhci_host *host;
@@ -1524,8 +1692,32 @@
 
 	host = mmc_priv(mmc);
 
-	/* Firstly check card presence */
+	if (sdhci_check_state(host)) {
+		sdhci_dump_state(host);
+		WARN(1, "sdhci in bad state");
+		mrq->cmd->error = -EIO;
+		if (mrq->data)
+			mrq->data->error = -EIO;
+		tasklet_schedule(&host->finish_tasklet);
+		return;
+	}
+
+	/*
+	 * Firstly check card presence from cd-gpio.  The return could
+	 * be one of the following possibilities:
+	 *     negative: cd-gpio is not available
+	 *     zero: cd-gpio is used, and card is removed
+	 *     one: cd-gpio is used, and card is present
+	 */
 	present = mmc->ops->get_cd(mmc);
+	if (present < 0) {
+		/* If polling, assume that the card is always present. */
+		if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
+			present = 1;
+		else
+			present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
+					SDHCI_CARD_PRESENT;
+	}
 
 	spin_lock_irqsave(&host->lock, flags);
 
@@ -1546,6 +1738,15 @@
 		mrq->cmd->error = -ENOMEDIUM;
 		sdhci_finish_mrq(host, mrq);
 	} else {
+		if (host->ops->config_auto_tuning_cmd) {
+			if (sdhci_check_auto_tuning(host, mrq->cmd))
+				host->ops->config_auto_tuning_cmd(host, true,
+					sdhci_get_tuning_cmd(host));
+			else
+				host->ops->config_auto_tuning_cmd(host, false,
+					sdhci_get_tuning_cmd(host));
+		}
+
 		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
 			sdhci_send_command(host, mrq->sbc);
 		else
@@ -1607,11 +1808,9 @@
 	struct sdhci_host *host = mmc_priv(mmc);
 	unsigned long flags;
 	u8 ctrl;
-
-	spin_lock_irqsave(&host->lock, flags);
+	int ret;
 
 	if (host->flags & SDHCI_DEVICE_DEAD) {
-		spin_unlock_irqrestore(&host->lock, flags);
 		if (!IS_ERR(mmc->supply.vmmc) &&
 		    ios->power_mode == MMC_POWER_OFF)
 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
@@ -1623,6 +1822,25 @@
 		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
 		sdhci_enable_preset_value(host, false);
 
+	/*
+	 * The controller clocks may be off during power-up and we may end up
+	 * enabling card clock before giving power to the card. Hence, during
+	 * MMC_POWER_UP enable the controller clock and turn-on the regulators.
+	 * The mmc_power_up would provide the necessary delay before turning on
+	 * the clocks to the card.
+	 */
+	if (ios->power_mode & MMC_POWER_UP) {
+		if (host->ops->enable_controller_clock) {
+			ret = host->ops->enable_controller_clock(host);
+			if (ret) {
+				pr_err("%s: enabling controller clock: failed: %d\n",
+				       mmc_hostname(host->mmc), ret);
+			} else {
+				sdhci_set_power(host, ios->power_mode, ios->vdd);
+			}
+		}
+	}
+
 	spin_lock_irqsave(&host->lock, flags);
 	if (!ios->clock || ios->clock != host->clock) {
 		spin_unlock_irqrestore(&host->lock, flags);
@@ -1647,9 +1865,13 @@
 	if (host->ops->set_power)
 		host->ops->set_power(host, ios->power_mode, ios->vdd);
 	else
-		if (ios->power_mode & (MMC_POWER_UP | MMC_POWER_ON))
+		if (!host->ops->enable_controller_clock && (ios->power_mode &
+							(MMC_POWER_UP |
+							 MMC_POWER_ON)))
 			sdhci_set_power(host, ios->power_mode, ios->vdd);
 
+	spin_lock_irqsave(&host->lock, flags);
+
 	if (host->ops->platform_send_init_74_clocks)
 		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
 
@@ -1716,7 +1938,8 @@
 			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
 
 			/* Re-enable SD Clock */
-			host->ops->set_clock(host, host->clock);
+			if (ios->clock)
+				host->ops->set_clock(host, host->clock);
 		}
 
 		/* Reset SD Clock Enable */
@@ -1743,7 +1966,8 @@
 		}
 
 		/* Re-enable SD Clock */
-		host->ops->set_clock(host, host->clock);
+		if (ios->clock)
+			host->ops->set_clock(host, host->clock);
 	} else
 		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
 
@@ -1907,6 +2131,8 @@
 		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
 		ctrl &= ~SDHCI_CTRL_VDD_180;
 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+		if (host->ops->check_power_status)
+			host->ops->check_power_status(host, REQ_IO_HIGH);
 
 		if (!IS_ERR(mmc->supply.vqmmc)) {
 			ret = mmc_regulator_set_vqmmc(mmc, ios);
@@ -1946,6 +2172,8 @@
 		 */
 		ctrl |= SDHCI_CTRL_VDD_180;
 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
+		if (host->ops->check_power_status)
+			host->ops->check_power_status(host, REQ_IO_LOW);
 
 		/* Some controller need to do more when switching */
 		if (host->ops->voltage_switch)
@@ -2227,6 +2455,9 @@
 	if (host->version < SDHCI_SPEC_300)
 		return;
 
+	if (host->quirks2 & SDHCI_QUIRK2_BROKEN_PRESET_VALUE)
+		return;
+
 	/*
 	 * We only enable or disable Preset Value if they are not already
 	 * enabled or disabled respectively. Otherwise, we bail out.
@@ -2340,6 +2571,7 @@
 	.card_busy	= sdhci_card_busy,
 	.enable		= sdhci_enable,
 	.disable	= sdhci_disable,
+	.notify_load	= sdhci_notify_load,
 };
 
 /*****************************************************************************\
@@ -2421,6 +2653,7 @@
 		sdhci_led_deactivate(host);
 
 	host->mrqs_done[i] = NULL;
+	host->auto_cmd_err_sts = 0;
 
 	mmiowb();
 	spin_unlock_irqrestore(&host->lock, flags);
@@ -2476,6 +2709,11 @@
 		sdhci_dumpregs(host);
 
 		if (host->data) {
+			pr_info("%s: bytes to transfer: %d transferred: %d\n",
+				mmc_hostname(host->mmc),
+				(host->data->blksz * host->data->blocks),
+				(sdhci_readw(host, SDHCI_BLOCK_SIZE) & 0xFFF) *
+				sdhci_readw(host, SDHCI_BLOCK_COUNT));
 			host->data->error = -ETIMEDOUT;
 			sdhci_finish_data(host);
 		} else if (host->data_cmd) {
@@ -2499,6 +2737,7 @@
 
 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)
 {
+	u16 auto_cmd_status;
 	if (!host->cmd) {
 		/*
 		 * SDHCI recovers from errors by resetting the cmd and data
@@ -2514,12 +2753,27 @@
 	}
 
 	if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
-		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
+		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX |
+		       SDHCI_INT_AUTO_CMD_ERR)) {
 		if (intmask & SDHCI_INT_TIMEOUT)
 			host->cmd->error = -ETIMEDOUT;
 		else
 			host->cmd->error = -EILSEQ;
 
+		if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
+			auto_cmd_status = host->auto_cmd_err_sts;
+			pr_err("%s: %s: AUTO CMD err sts 0x%08x\n",
+				mmc_hostname(host->mmc), __func__, auto_cmd_status);
+			if (auto_cmd_status & (SDHCI_AUTO_CMD12_NOT_EXEC |
+					       SDHCI_AUTO_CMD_INDEX_ERR |
+					       SDHCI_AUTO_CMD_ENDBIT_ERR))
+				host->cmd->error = -EIO;
+			else if (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT_ERR)
+				host->cmd->error = -ETIMEDOUT;
+			else if (auto_cmd_status & SDHCI_AUTO_CMD_CRC_ERR)
+				host->cmd->error = -EILSEQ;
+		}
+
 		/*
 		 * If this command initiates a data phase and a response
 		 * CRC error is signalled, the card can start transferring
@@ -2581,6 +2835,7 @@
 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
 {
 	u32 command;
+	bool pr_msg = false;
 
 	/* CMD19 generates _only_ Buffer Read Ready interrupt */
 	if (intmask & SDHCI_INT_DATA_AVAIL) {
@@ -2621,6 +2876,9 @@
 				sdhci_finish_mrq(host, data_cmd->mrq);
 				return;
 			}
+			if (host->quirks2 &
+				SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD)
+				return;
 		}
 
 		/*
@@ -2653,10 +2911,25 @@
 		if (host->ops->adma_workaround)
 			host->ops->adma_workaround(host, intmask);
 	}
-
-	if (host->data->error)
+	if (host->data->error) {
+		if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT)) {
+			command = SDHCI_GET_CMD(sdhci_readw(host,
+							    SDHCI_COMMAND));
+			if ((command != MMC_SEND_TUNING_BLOCK_HS200) &&
+			    (command != MMC_SEND_TUNING_BLOCK))
+				pr_msg = true;
+		} else {
+			pr_msg = true;
+		}
+		if (pr_msg) {
+			pr_err("%s: data txfr (0x%08x) error: %d after %lld ms\n",
+			       mmc_hostname(host->mmc), intmask,
+			       host->data->error, ktime_to_ms(ktime_sub(
+			       ktime_get(), host->data_start_time)));
+			sdhci_dumpregs(host);
+		}
 		sdhci_finish_data(host);
-	else {
+	} else {
 		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
 			sdhci_transfer_pio(host);
 
@@ -2723,6 +2996,9 @@
 	}
 
 	do {
+		if (intmask & SDHCI_INT_AUTO_CMD_ERR)
+			host->auto_cmd_err_sts = sdhci_readw(host,
+					SDHCI_AUTO_CMD_ERR);
 		/* Clear selected interrupts. */
 		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
 				  SDHCI_INT_BUS_POWER);
@@ -3052,6 +3328,8 @@
 
 	host->flags = SDHCI_SIGNALING_330;
 
+	spin_lock_init(&host->lock);
+
 	return host;
 }
 
@@ -3542,8 +3820,6 @@
 	if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
 		host->flags |= SDHCI_SIGNALING_120;
 
-	spin_lock_init(&host->lock);
-
 	/*
 	 * Maximum number of segments. Depends on if the hardware
 	 * can do scatter/gather or not.
@@ -3656,14 +3932,33 @@
 
 	mmiowb();
 
-	if (host->cpu_dma_latency_us)
+	if (host->cpu_dma_latency_us) {
+		host->pm_qos_timeout_us = 10000; /* default value */
 		pm_qos_add_request(&host->pm_qos_req_dma,
 				PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
 
+		host->pm_qos_tout.show = show_sdhci_pm_qos_tout;
+		host->pm_qos_tout.store = store_sdhci_pm_qos_tout;
+		sysfs_attr_init(&host->pm_qos_tout.attr);
+		host->pm_qos_tout.attr.name = "pm_qos_unvote_delay";
+		host->pm_qos_tout.attr.mode = S_IRUGO | S_IWUSR;
+		ret = device_create_file(mmc_dev(mmc), &host->pm_qos_tout);
+		if (ret)
+			pr_err("%s: cannot create pm_qos_unvote_delay %d\n",
+					mmc_hostname(mmc), ret);
+
+	}
+
 	ret = mmc_add_host(mmc);
 	if (ret)
 		goto unled;
 
+	if (host->quirks2 & SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR) {
+		host->ier = (host->ier & ~SDHCI_INT_DATA_END_BIT);
+		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
+		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+	}
+
 	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
 		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
 		(host->flags & SDHCI_USE_ADMA) ?
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index a8d4cfa..5e809d4 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -142,14 +142,16 @@
 #define  SDHCI_INT_DATA_CRC	0x00200000
 #define  SDHCI_INT_DATA_END_BIT	0x00400000
 #define  SDHCI_INT_BUS_POWER	0x00800000
-#define  SDHCI_INT_ACMD12ERR	0x01000000
+#define  SDHCI_INT_AUTO_CMD_ERR	0x01000000
 #define  SDHCI_INT_ADMA_ERROR	0x02000000
 
 #define  SDHCI_INT_NORMAL_MASK	0x00007FFF
 #define  SDHCI_INT_ERROR_MASK	0xFFFF8000
 
 #define  SDHCI_INT_CMD_MASK	(SDHCI_INT_RESPONSE | SDHCI_INT_TIMEOUT | \
-		SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX)
+		SDHCI_INT_CRC | SDHCI_INT_END_BIT | SDHCI_INT_INDEX | \
+				 SDHCI_INT_AUTO_CMD_ERR)
+
 #define  SDHCI_INT_DATA_MASK	(SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \
 		SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \
 		SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \
@@ -157,7 +159,13 @@
 		SDHCI_INT_BLK_GAP)
 #define SDHCI_INT_ALL_MASK	((unsigned int)-1)
 
-#define SDHCI_ACMD12_ERR	0x3C
+#define SDHCI_AUTO_CMD_ERR		0x3C
+#define SDHCI_AUTO_CMD12_NOT_EXEC	0x0001
+#define SDHCI_AUTO_CMD_TIMEOUT_ERR	0x0002
+#define SDHCI_AUTO_CMD_CRC_ERR		0x0004
+#define SDHCI_AUTO_CMD_ENDBIT_ERR	0x0008
+#define SDHCI_AUTO_CMD_INDEX_ERR	0x0010
+#define SDHCI_AUTO_CMD12_NOT_ISSUED	0x0080
 
 #define SDHCI_HOST_CONTROL2		0x3E
 #define  SDHCI_CTRL_UHS_MASK		0x0007
@@ -329,6 +337,11 @@
 	COOKIE_MAPPED,		/* mapped by sdhci_prepare_data() */
 };
 
+enum sdhci_power_policy {
+	SDHCI_PERFORMANCE_MODE,
+	SDHCI_POWER_SAVE_MODE,
+};
+
 struct sdhci_host {
 	/* Data set by hardware interface driver */
 	const char *hw_name;	/* Hardware bus name */
@@ -438,6 +451,47 @@
 */
 #define SDHCI_QUIRK2_SLOW_INT_CLR			(1<<18)
 
+#define SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK		(1<<19)
+
+/*
+ * Ignore data timeout error for R1B commands as there will be no
+ * data associated and the busy timeout value for these commands
+ * could be lager than the maximum timeout value that controller
+ * can handle.
+ */
+#define SDHCI_QUIRK2_IGNORE_DATATOUT_FOR_R1BCMD		(1<<21)
+
+/*
+ * The preset value registers are not properly initialized by
+ * some hardware and hence preset value must not be enabled for
+ * such controllers.
+ */
+#define SDHCI_QUIRK2_BROKEN_PRESET_VALUE		(1<<22)
+/*
+ * Some controllers define the usage of 0xF in data timeout counter
+ * register (0x2E) which is actually a reserved bit as per
+ * specification.
+ */
+#define SDHCI_QUIRK2_USE_RESERVED_MAX_TIMEOUT		(1<<23)
+/*
+ * This is applicable for controllers that advertize timeout clock
+ * value in capabilities register (bit 5-0) as just 50MHz whereas the
+ * base clock frequency is 200MHz. So, the controller internally
+ * multiplies the value in timeout control register by 4 with the
+ * assumption that driver always uses fixed timeout clock value from
+ * capabilities register to calculate the timeout. But when the driver
+ * uses SDHCI_QUIRK2_ALWAYS_USE_BASE_CLOCK base clock frequency is directly
+ * controller by driver and it's rate varies upto max. 200MHz. This new quirk
+ * will be used in such cases to avoid controller mulplication when timeout is
+ * calculated based on the base clock.
+ */
+#define SDHCI_QUIRK2_DIVIDE_TOUT_BY_4 (1 << 23)
+
+/*
+ * Some SDHC controllers are unable to handle data-end bit error in
+ * 1-bit mode of SDIO.
+ */
+#define SDHCI_QUIRK2_IGN_DATA_END_BIT_ERROR             (1<<24)
 	int irq;		/* Device IRQ */
 	void __iomem *ioaddr;	/* Mapped address */
 
@@ -503,6 +557,10 @@
 	size_t adma_table_sz;	/* ADMA descriptor table size */
 	size_t align_buffer_sz;	/* Bounce buffer size */
 
+	unsigned int adma_desc_sz; /* ADMA descriptor table size */
+	unsigned int align_buf_sz; /* Bounce buffer size */
+	unsigned int adma_max_desc; /* Max ADMA descriptos (max sg segments) */
+
 	dma_addr_t adma_addr;	/* Mapped ADMA descr. table */
 	dma_addr_t align_addr;	/* Mapped bounce buffer */
 
@@ -540,6 +598,14 @@
 
 	unsigned int		cpu_dma_latency_us;
 	struct pm_qos_request	pm_qos_req_dma;
+	ktime_t data_start_time;
+
+	unsigned int pm_qos_timeout_us;         /* timeout for PM QoS request */
+	struct device_attribute pm_qos_tout;
+
+	enum sdhci_power_policy power_policy;
+
+	u32 auto_cmd_err_sts;
 
 	unsigned long private[0] ____cacheline_aligned;
 };
@@ -575,10 +641,19 @@
 	void	(*hw_reset)(struct sdhci_host *host);
 	void    (*adma_workaround)(struct sdhci_host *host, u32 intmask);
 	unsigned int	(*get_max_segments)(void);
+#define REQ_BUS_OFF	(1 << 0)
+#define REQ_BUS_ON	(1 << 1)
+#define REQ_IO_LOW	(1 << 2)
+#define REQ_IO_HIGH	(1 << 3)
 	void    (*card_event)(struct sdhci_host *host);
 	void	(*platform_bus_voting)(struct sdhci_host *host, u32 enable);
 	void	(*toggle_cdr)(struct sdhci_host *host, bool enable);
-	void	(*check_power_status)(struct sdhci_host *host);
+	void	(*check_power_status)(struct sdhci_host *host, u32 req_type);
+	int	(*config_auto_tuning_cmd)(struct sdhci_host *host,
+					  bool enable,
+					  u32 type);
+	int	(*enable_controller_clock)(struct sdhci_host *host);
+	void	(*dump_vendor_regs)(struct sdhci_host *host);
 	void	(*voltage_switch)(struct sdhci_host *host);
 	int	(*select_drive_strength)(struct sdhci_host *host,
 					 struct mmc_card *card,
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 34764de..1e45c73 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -5577,7 +5577,7 @@
 				handle_aer_irq(irq, data);
 				break;
 			default:
-				PCIE_ERR(dev,
+				PCIE_DUMP(dev,
 					"PCIe: RC%d: Unexpected event %d is caught!\n",
 					dev->rc_idx, i);
 			}
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index f6fa78f..46dc148 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -278,6 +278,7 @@
 	POWER_SUPPLY_ATTR(dp_dm),
 	POWER_SUPPLY_ATTR(input_current_limited),
 	POWER_SUPPLY_ATTR(input_current_now),
+	POWER_SUPPLY_ATTR(charge_qnovo_enable),
 	POWER_SUPPLY_ATTR(current_qnovo),
 	POWER_SUPPLY_ATTR(voltage_qnovo),
 	POWER_SUPPLY_ATTR(rerun_aicl),
@@ -306,6 +307,7 @@
 	POWER_SUPPLY_ATTR(die_health),
 	POWER_SUPPLY_ATTR(connector_health),
 	POWER_SUPPLY_ATTR(ctm_current_max),
+	POWER_SUPPLY_ATTR(hw_current_max),
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_ATTR(charge_counter_ext),
 	/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 3659b92..b985ecd 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -13,6 +13,7 @@
 #define pr_fmt(fmt) "QCOM-BATT: %s: " fmt, __func__
 
 #include <linux/device.h>
+#include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/regmap.h>
@@ -24,7 +25,7 @@
 #include <linux/printk.h>
 #include <linux/pm_wakeup.h>
 #include <linux/slab.h>
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
 
 #define DRV_MAJOR_VERSION	1
 #define DRV_MINOR_VERSION	0
@@ -36,6 +37,7 @@
 #define PL_HW_ABSENT_VOTER		"PL_HW_ABSENT_VOTER"
 #define PL_VOTER			"PL_VOTER"
 #define RESTRICT_CHG_VOTER		"RESTRICT_CHG_VOTER"
+#define ICL_CHANGE_VOTER		"ICL_CHANGE_VOTER"
 
 struct pl_data {
 	int			pl_mode;
@@ -49,14 +51,16 @@
 	struct votable		*pl_disable_votable;
 	struct votable		*pl_awake_votable;
 	struct votable		*hvdcp_hw_inov_dis_votable;
+	struct votable		*usb_icl_votable;
 	struct work_struct	status_change_work;
 	struct work_struct	pl_disable_forever_work;
 	struct delayed_work	pl_taper_work;
 	struct power_supply	*main_psy;
 	struct power_supply	*pl_psy;
 	struct power_supply	*batt_psy;
+	struct power_supply	*usb_psy;
 	int			charge_type;
-	int			main_settled_ua;
+	int			total_settled_ua;
 	int			pl_settled_ua;
 	struct class		qcom_batt_class;
 	struct wakeup_source	*pl_ws;
@@ -92,15 +96,10 @@
  ********/
 static void split_settled(struct pl_data *chip)
 {
-	int slave_icl_pct;
+	int slave_icl_pct, total_current_ua;
 	int slave_ua = 0, main_settled_ua = 0;
 	union power_supply_propval pval = {0, };
-	int rc;
-
-	/* TODO some parallel chargers do not have a fine ICL resolution. For
-	 * them implement a psy interface which returns the closest lower ICL
-	 * for desired split
-	 */
+	int rc, total_settled_ua = 0;
 
 	if ((chip->pl_mode != POWER_SUPPLY_PL_USBIN_USBIN)
 		&& (chip->pl_mode != POWER_SUPPLY_PL_USBIN_USBIN_EXT))
@@ -122,12 +121,31 @@
 		slave_icl_pct = max(0, chip->slave_pct - 10);
 		slave_ua = ((main_settled_ua + chip->pl_settled_ua)
 						* slave_icl_pct) / 100;
+		total_settled_ua = main_settled_ua + chip->pl_settled_ua;
 	}
 
-	/* ICL_REDUCTION on main could be 0mA when pl is disabled */
-	pval.intval = slave_ua;
+	total_current_ua = get_effective_result_locked(chip->usb_icl_votable);
+	if (total_current_ua < 0) {
+		if (!chip->usb_psy)
+			chip->usb_psy = power_supply_get_by_name("usb");
+		if (!chip->usb_psy) {
+			pr_err("Couldn't get usbpsy while splitting settled\n");
+			return;
+		}
+		/* no client is voting, so get the total current from charger */
+		rc = power_supply_get_property(chip->usb_psy,
+			POWER_SUPPLY_PROP_HW_CURRENT_MAX, &pval);
+		if (rc < 0) {
+			pr_err("Couldn't get max current rc=%d\n", rc);
+			return;
+		}
+		total_current_ua = pval.intval;
+	}
+
+	pval.intval = total_current_ua - slave_ua;
+	/* Set ICL on main charger */
 	rc = power_supply_set_property(chip->main_psy,
-			POWER_SUPPLY_PROP_ICL_REDUCTION, &pval);
+				POWER_SUPPLY_PROP_CURRENT_MAX, &pval);
 	if (rc < 0) {
 		pr_err("Couldn't change slave suspend state rc=%d\n", rc);
 		return;
@@ -142,10 +160,12 @@
 		return;
 	}
 
-	/* main_settled_ua represents the total capability of adapter */
-	if (!chip->main_settled_ua)
-		chip->main_settled_ua = main_settled_ua;
+	chip->total_settled_ua = total_settled_ua;
 	chip->pl_settled_ua = slave_ua;
+
+	pl_dbg(chip, PR_PARALLEL,
+		"Split total_current_ua=%d main_settled_ua=%d slave_ua=%d\n",
+		total_current_ua, main_settled_ua, slave_ua);
 }
 
 static ssize_t version_show(struct class *c, struct class_attribute *attr,
@@ -213,6 +233,10 @@
 
 	chip->restricted_charging_enabled = !!val;
 
+	/* disable parallel charger in case of restricted charging */
+	vote(chip->pl_disable_votable, RESTRICT_CHG_VOTER,
+				chip->restricted_charging_enabled, 0);
+
 	vote(chip->fcc_votable, RESTRICT_CHG_VOTER,
 				chip->restricted_charging_enabled,
 				chip->restricted_current);
@@ -487,6 +511,59 @@
 	return 0;
 }
 
+#define ICL_STEP_UV	25000
+static int usb_icl_vote_callback(struct votable *votable, void *data,
+			int icl_ua, const char *client)
+{
+	int rc;
+	struct pl_data *chip = data;
+	union power_supply_propval pval = {0, };
+
+	if (!chip->main_psy)
+		return 0;
+
+	if (client == NULL)
+		icl_ua = INT_MAX;
+
+	/*
+	 * Disable parallel for new ICL vote - the call to split_settled will
+	 * ensure that all the input current limit gets assigned to the main
+	 * charger.
+	 */
+	vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, true, 0);
+
+	/* rerun AICL */
+	/* get the settled current */
+	rc = power_supply_get_property(chip->main_psy,
+			       POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
+			       &pval);
+	if (rc < 0) {
+		pr_err("Couldn't get aicl settled value rc=%d\n", rc);
+		return rc;
+	}
+
+	/* rerun AICL if new ICL is above settled ICL */
+	if (icl_ua > pval.intval) {
+		/* set a lower ICL */
+		pval.intval = max(pval.intval - ICL_STEP_UV, ICL_STEP_UV);
+		power_supply_set_property(chip->main_psy,
+				POWER_SUPPLY_PROP_CURRENT_MAX,
+				&pval);
+		/* wait for ICL change */
+		msleep(100);
+
+		pval.intval = icl_ua;
+		power_supply_set_property(chip->main_psy,
+				POWER_SUPPLY_PROP_CURRENT_MAX,
+				&pval);
+		/* wait for ICL change */
+		msleep(100);
+	}
+	vote(chip->pl_disable_votable, ICL_CHANGE_VOTER, false, 0);
+
+	return 0;
+}
+
 static void pl_disable_forever_work(struct work_struct *work)
 {
 	struct pl_data *chip = container_of(work,
@@ -508,7 +585,7 @@
 	int rc;
 
 	chip->taper_pct = 100;
-	chip->main_settled_ua = 0;
+	chip->total_settled_ua = 0;
 	chip->pl_settled_ua = 0;
 
 	if (!pl_disable) { /* enable */
@@ -596,13 +673,15 @@
 
 static bool is_main_available(struct pl_data *chip)
 {
-	if (!chip->main_psy)
-		chip->main_psy = power_supply_get_by_name("main");
+	if (chip->main_psy)
+		return true;
 
-	if (!chip->main_psy)
-		return false;
+	chip->main_psy = power_supply_get_by_name("main");
 
-	return true;
+	if (chip->main_psy)
+		rerun_election(chip->usb_icl_votable);
+
+	return !!chip->main_psy;
 }
 
 static bool is_batt_available(struct pl_data *chip)
@@ -711,6 +790,7 @@
 static void handle_settled_icl_change(struct pl_data *chip)
 {
 	union power_supply_propval pval = {0, };
+	int new_total_settled_ua;
 	int rc;
 
 	if (get_effective_result(chip->pl_disable_votable))
@@ -730,9 +810,15 @@
 			return;
 		}
 
+		new_total_settled_ua = pval.intval + chip->pl_settled_ua;
+		pl_dbg(chip, PR_PARALLEL,
+			"total_settled_ua=%d settled_ua=%d new_total_settled_ua=%d\n",
+			chip->total_settled_ua, pval.intval,
+			new_total_settled_ua);
+
 		/* If ICL change is small skip splitting */
-		if (abs((chip->main_settled_ua - chip->pl_settled_ua)
-				- pval.intval) > MIN_ICL_CHANGE_DELTA_UA)
+		if (abs(new_total_settled_ua - chip->total_settled_ua)
+						> MIN_ICL_CHANGE_DELTA_UA)
 			split_settled(chip);
 	} else {
 		rerun_election(chip->fcc_votable);
@@ -855,6 +941,14 @@
 		goto destroy_votable;
 	}
 
+	chip->usb_icl_votable = create_votable("USB_ICL", VOTE_MIN,
+					usb_icl_vote_callback,
+					chip);
+	if (IS_ERR(chip->usb_icl_votable)) {
+		rc = PTR_ERR(chip->usb_icl_votable);
+		goto destroy_votable;
+	}
+
 	chip->pl_disable_votable = create_votable("PL_DISABLE", VOTE_SET_ANY,
 					pl_disable_vote_callback,
 					chip);
@@ -909,6 +1003,7 @@
 	destroy_votable(chip->pl_disable_votable);
 	destroy_votable(chip->fv_votable);
 	destroy_votable(chip->fcc_votable);
+	destroy_votable(chip->usb_icl_votable);
 release_wakeup_source:
 	wakeup_source_unregister(chip->pl_ws);
 cleanup:
diff --git a/drivers/power/supply/qcom/fg-core.h b/drivers/power/supply/qcom/fg-core.h
index c0ba5a9..48fe04f 100644
--- a/drivers/power/supply/qcom/fg-core.h
+++ b/drivers/power/supply/qcom/fg-core.h
@@ -29,7 +29,7 @@
 #include <linux/string_helpers.h>
 #include <linux/types.h>
 #include <linux/uaccess.h>
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
 
 #define fg_dbg(chip, reason, fmt, ...)			\
 	do {							\
@@ -46,10 +46,13 @@
 			&& (value) <= (right)))
 
 /* Awake votable reasons */
-#define SRAM_READ	"fg_sram_read"
-#define SRAM_WRITE	"fg_sram_write"
-#define PROFILE_LOAD	"fg_profile_load"
-#define DELTA_SOC	"fg_delta_soc"
+#define SRAM_READ		"fg_sram_read"
+#define SRAM_WRITE		"fg_sram_write"
+#define PROFILE_LOAD		"fg_profile_load"
+#define DELTA_SOC		"fg_delta_soc"
+
+/* Delta BSOC votable reasons */
+#define DELTA_BSOC_IRQ_VOTER	"fg_delta_bsoc_irq"
 
 #define DEBUG_PRINT_BUFFER_SIZE		64
 /* 3 byte address + 1 space character */
@@ -159,6 +162,7 @@
 	FG_SRAM_ESR_TIMER_DISCHG_INIT,
 	FG_SRAM_ESR_TIMER_CHG_MAX,
 	FG_SRAM_ESR_TIMER_CHG_INIT,
+	FG_SRAM_ESR_PULSE_THRESH,
 	FG_SRAM_SYS_TERM_CURR,
 	FG_SRAM_CHG_TERM_CURR,
 	FG_SRAM_DELTA_MSOC_THR,
@@ -250,6 +254,8 @@
 	int	esr_tight_lt_flt_upct;
 	int	esr_broad_lt_flt_upct;
 	int	slope_limit_temp;
+	int	esr_pulse_thresh_ma;
+	int	esr_meas_curr_ma;
 	int	jeita_thresholds[NUM_JEITA_LEVELS];
 	int	ki_coeff_soc[KI_COEFF_SOC_LEVELS];
 	int	ki_coeff_med_dischg[KI_COEFF_SOC_LEVELS];
@@ -330,6 +336,7 @@
 	struct fg_memif		*sram;
 	struct fg_irq_info	*irqs;
 	struct votable		*awake_votable;
+	struct votable		*delta_bsoc_irq_en_votable;
 	struct fg_sram_param	*sp;
 	struct fg_alg_flag	*alg_flags;
 	int			*debug_mask;
@@ -370,8 +377,8 @@
 	bool			esr_fcc_ctrl_en;
 	bool			soc_reporting_ready;
 	bool			esr_flt_cold_temp_en;
-	bool			bsoc_delta_irq_en;
 	bool			slope_limit_en;
+	bool			use_ima_single_mode;
 	struct completion	soc_update;
 	struct completion	soc_ready;
 	struct delayed_work	profile_load_work;
diff --git a/drivers/power/supply/qcom/fg-memif.c b/drivers/power/supply/qcom/fg-memif.c
index 2dc7618..8a949bf 100644
--- a/drivers/power/supply/qcom/fg-memif.c
+++ b/drivers/power/supply/qcom/fg-memif.c
@@ -48,6 +48,10 @@
 	int rc;
 	u8 intf_ctl = 0;
 
+	fg_dbg(chip, FG_SRAM_READ | FG_SRAM_WRITE, "access: %d burst: %d\n",
+		access, burst);
+
+	WARN_ON(burst && chip->use_ima_single_mode);
 	intf_ctl = ((access == FG_WRITE) ? IMA_WR_EN_BIT : 0) |
 			(burst ? MEM_ACS_BURST_BIT : 0);
 
@@ -175,6 +179,7 @@
 {
 	int rc;
 	u8 dma_sts;
+	bool error_present;
 
 	rc = fg_read(chip, MEM_IF_DMA_STS(chip), &dma_sts, 1);
 	if (rc < 0) {
@@ -184,14 +189,13 @@
 	}
 	fg_dbg(chip, FG_STATUS, "dma_sts: %x\n", dma_sts);
 
-	if (dma_sts & (DMA_WRITE_ERROR_BIT | DMA_READ_ERROR_BIT)) {
-		rc = fg_masked_write(chip, MEM_IF_DMA_CTL(chip),
-				DMA_CLEAR_LOG_BIT, DMA_CLEAR_LOG_BIT);
-		if (rc < 0) {
-			pr_err("failed to write addr=0x%04x, rc=%d\n",
-				MEM_IF_DMA_CTL(chip), rc);
-			return rc;
-		}
+	error_present = dma_sts & (DMA_WRITE_ERROR_BIT | DMA_READ_ERROR_BIT);
+	rc = fg_masked_write(chip, MEM_IF_DMA_CTL(chip), DMA_CLEAR_LOG_BIT,
+			error_present ? DMA_CLEAR_LOG_BIT : 0);
+	if (rc < 0) {
+		pr_err("failed to write addr=0x%04x, rc=%d\n",
+			MEM_IF_DMA_CTL(chip), rc);
+		return rc;
 	}
 
 	return 0;
@@ -293,7 +297,9 @@
 		/* check for error condition */
 		rc = fg_clear_ima_errors_if_any(chip, false);
 		if (rc < 0) {
-			pr_err("Failed to check for ima errors rc=%d\n", rc);
+			if (rc != -EAGAIN)
+				pr_err("Failed to check for ima errors rc=%d\n",
+					rc);
 			return rc;
 		}
 
@@ -357,7 +363,12 @@
 		/* check for error condition */
 		rc = fg_clear_ima_errors_if_any(chip, false);
 		if (rc < 0) {
-			pr_err("Failed to check for ima errors rc=%d\n", rc);
+			if (rc == -EAGAIN)
+				pr_err("IMA error cleared, address [%d %d] len %d\n",
+					address, offset, len);
+			else
+				pr_err("Failed to check for ima errors rc=%d\n",
+					rc);
 			return rc;
 		}
 
@@ -365,6 +376,15 @@
 		len -= num_bytes;
 		offset = byte_enable = 0;
 
+		if (chip->use_ima_single_mode && len) {
+			address++;
+			rc = fg_set_address(chip, address);
+			if (rc < 0) {
+				pr_err("failed to set address rc = %d\n", rc);
+				return rc;
+			}
+		}
+
 		rc = fg_check_iacs_ready(chip);
 		if (rc < 0) {
 			pr_debug("IACS_RDY failed rc=%d\n", rc);
@@ -403,22 +423,40 @@
 		/* check for error condition */
 		rc = fg_clear_ima_errors_if_any(chip, false);
 		if (rc < 0) {
-			pr_err("Failed to check for ima errors rc=%d\n", rc);
+			if (rc == -EAGAIN)
+				pr_err("IMA error cleared, address [%d %d] len %d\n",
+					address, offset, len);
+			else
+				pr_err("Failed to check for ima errors rc=%d\n",
+					rc);
 			return rc;
 		}
 
-		if (len && len < BYTES_PER_SRAM_WORD) {
-			/*
-			 * Move to single mode. Changing address is not
-			 * required here as it must be in burst mode. Address
-			 * will get incremented internally by FG HW once the MSB
-			 * of RD_DATA is read.
-			 */
-			rc = fg_config_access_mode(chip, FG_READ, 0);
-			if (rc < 0) {
-				pr_err("failed to move to single mode rc=%d\n",
-					rc);
-				return -EIO;
+		if (chip->use_ima_single_mode) {
+			if (len) {
+				address++;
+				rc = fg_set_address(chip, address);
+				if (rc < 0) {
+					pr_err("failed to set address rc = %d\n",
+						rc);
+					return rc;
+				}
+			}
+		} else {
+			if (len && len < BYTES_PER_SRAM_WORD) {
+				/*
+				 * Move to single mode. Changing address is not
+				 * required here as it must be in burst mode.
+				 * Address will get incremented internally by FG
+				 * HW once the MSB of RD_DATA is read.
+				 */
+				rc = fg_config_access_mode(chip, FG_READ,
+								false);
+				if (rc < 0) {
+					pr_err("failed to move to single mode rc=%d\n",
+						rc);
+					return -EIO;
+				}
 			}
 		}
 
@@ -489,6 +527,7 @@
 		u16 address, int offset, int len, bool access)
 {
 	int rc = 0;
+	bool burst_mode = false;
 
 	if (!is_mem_access_available(chip, access))
 		return -EBUSY;
@@ -503,7 +542,8 @@
 	}
 
 	/* configure for the read/write, single/burst mode */
-	rc = fg_config_access_mode(chip, access, (offset + len) > 4);
+	burst_mode = chip->use_ima_single_mode ? false : ((offset + len) > 4);
+	rc = fg_config_access_mode(chip, access, burst_mode);
 	if (rc < 0) {
 		pr_err("failed to set memory access rc = %d\n", rc);
 		return rc;
@@ -583,7 +623,7 @@
 	if (rc < 0) {
 		count++;
 		if (rc == -EAGAIN) {
-			pr_err("IMA access failed retry_count = %d\n", count);
+			pr_err("IMA read failed retry_count = %d\n", count);
 			goto retry;
 		}
 		pr_err("failed to read SRAM address rc = %d\n", rc);
@@ -667,8 +707,8 @@
 	rc = __fg_interleaved_mem_write(chip, address, offset, val, len);
 	if (rc < 0) {
 		count++;
-		if ((rc == -EAGAIN) && (count < RETRY_COUNT)) {
-			pr_err("IMA access failed retry_count = %d\n", count);
+		if (rc == -EAGAIN) {
+			pr_err("IMA write failed retry_count = %d\n", count);
 			goto retry;
 		}
 		pr_err("failed to write SRAM address rc = %d\n", rc);
diff --git a/drivers/power/supply/qcom/fg-reg.h b/drivers/power/supply/qcom/fg-reg.h
index bf2827f..cd0b2fb 100644
--- a/drivers/power/supply/qcom/fg-reg.h
+++ b/drivers/power/supply/qcom/fg-reg.h
@@ -167,6 +167,7 @@
 
 /* BATT_INFO_ESR_PULL_DN_CFG */
 #define ESR_PULL_DOWN_IVAL_MASK			GENMASK(3, 2)
+#define ESR_PULL_DOWN_IVAL_SHIFT		2
 #define ESR_MEAS_CUR_60MA			0x0
 #define ESR_MEAS_CUR_120MA			0x1
 #define ESR_MEAS_CUR_180MA			0x2
diff --git a/drivers/power/supply/qcom/pmic-voter.c b/drivers/power/supply/qcom/pmic-voter.c
index 39a0dcb6..10a1c54 100644
--- a/drivers/power/supply/qcom/pmic-voter.c
+++ b/drivers/power/supply/qcom/pmic-voter.c
@@ -18,9 +18,9 @@
 #include <linux/slab.h>
 #include <linux/string.h>
 
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
 
-#define NUM_MAX_CLIENTS	8
+#define NUM_MAX_CLIENTS		16
 #define DEBUG_FORCE_CLIENT	"DEBUG_FORCE_CLIENT"
 
 static DEFINE_SPINLOCK(votable_list_slock);
@@ -188,6 +188,38 @@
 }
 
 /**
+ * is_client_vote_enabled() -
+ * is_client_vote_enabled_locked() -
+ *		The unlocked and locked variants of getting whether a client's
+		vote is enabled.
+ * @votable:	the votable object
+ * @client_str: client of interest
+ *
+ * Returns:
+ *	True if the client's vote is enabled; false otherwise.
+ */
+bool is_client_vote_enabled_locked(struct votable *votable,
+							const char *client_str)
+{
+	int client_id = get_client_id(votable, client_str);
+
+	if (client_id < 0)
+		return false;
+
+	return votable->votes[client_id].enabled;
+}
+
+bool is_client_vote_enabled(struct votable *votable, const char *client_str)
+{
+	bool enabled;
+
+	lock_votable(votable);
+	enabled = is_client_vote_enabled_locked(votable, client_str);
+	unlock_votable(votable);
+	return enabled;
+}
+
+/**
  * get_client_vote() -
  * get_client_vote_locked() -
  *		The unlocked and locked variants of getting a client's voted
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index 304d0cf..7ab5b31 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -31,6 +31,8 @@
 #define FG_MEM_INFO_PMI8998		0x0D
 
 /* SRAM address and offset in ascending order */
+#define ESR_PULSE_THRESH_WORD		2
+#define ESR_PULSE_THRESH_OFFSET		3
 #define SLOPE_LIMIT_WORD		3
 #define SLOPE_LIMIT_OFFSET		0
 #define CUTOFF_VOLT_WORD		5
@@ -216,6 +218,8 @@
 		ESR_TIMER_CHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
 	PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD,
 		ESR_TIMER_CHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+	PARAM(ESR_PULSE_THRESH, ESR_PULSE_THRESH_WORD, ESR_PULSE_THRESH_OFFSET,
+		1, 100000, 390625, 0, fg_encode_default, NULL),
 	PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_WORD,
 		KI_COEFF_MED_DISCHG_OFFSET, 1, 1000, 244141, 0,
 		fg_encode_default, NULL),
@@ -286,6 +290,8 @@
 		ESR_TIMER_CHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
 	PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD,
 		ESR_TIMER_CHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+	PARAM(ESR_PULSE_THRESH, ESR_PULSE_THRESH_WORD, ESR_PULSE_THRESH_OFFSET,
+		1, 100000, 390625, 0, fg_encode_default, NULL),
 	PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_v2_WORD,
 		KI_COEFF_MED_DISCHG_v2_OFFSET, 1, 1000, 244141, 0,
 		fg_encode_default, NULL),
@@ -525,7 +531,7 @@
 }
 
 #define CC_SOC_30BIT	GENMASK(29, 0)
-static int fg_get_cc_soc(struct fg_chip *chip, int *val)
+static int fg_get_charge_raw(struct fg_chip *chip, int *val)
 {
 	int rc, cc_soc;
 
@@ -539,7 +545,7 @@
 	return 0;
 }
 
-static int fg_get_cc_soc_sw(struct fg_chip *chip, int *val)
+static int fg_get_charge_counter(struct fg_chip *chip, int *val)
 {
 	int rc, cc_soc;
 
@@ -981,6 +987,29 @@
 	};
 }
 
+static inline void get_esr_meas_current(int curr_ma, u8 *val)
+{
+	switch (curr_ma) {
+	case 60:
+		*val = ESR_MEAS_CUR_60MA;
+		break;
+	case 120:
+		*val = ESR_MEAS_CUR_120MA;
+		break;
+	case 180:
+		*val = ESR_MEAS_CUR_180MA;
+		break;
+	case 240:
+		*val = ESR_MEAS_CUR_240MA;
+		break;
+	default:
+		*val = ESR_MEAS_CUR_120MA;
+		break;
+	};
+
+	*val <<= ESR_PULL_DOWN_IVAL_SHIFT;
+}
+
 static int fg_set_esr_timer(struct fg_chip *chip, int cycles, bool charging,
 				int flags)
 {
@@ -1054,6 +1083,25 @@
 	fg_dbg(chip, FG_STATUS, "Notified charger on float voltage and FCC\n");
 }
 
+static int fg_delta_bsoc_irq_en_cb(struct votable *votable, void *data,
+					int enable, const char *client)
+{
+	struct fg_chip *chip = data;
+
+	if (!chip->irqs[BSOC_DELTA_IRQ].irq)
+		return 0;
+
+	if (enable) {
+		enable_irq(chip->irqs[BSOC_DELTA_IRQ].irq);
+		enable_irq_wake(chip->irqs[BSOC_DELTA_IRQ].irq);
+	} else {
+		disable_irq_wake(chip->irqs[BSOC_DELTA_IRQ].irq);
+		disable_irq(chip->irqs[BSOC_DELTA_IRQ].irq);
+	}
+
+	return 0;
+}
+
 static int fg_awake_cb(struct votable *votable, void *data, int awake,
 			const char *client)
 {
@@ -1241,7 +1289,7 @@
 		chip->cl.final_cc_uah, old_cap, chip->cl.learned_cc_uah);
 }
 
-static int  fg_cap_learning_process_full_data(struct fg_chip *chip)
+static int fg_cap_learning_process_full_data(struct fg_chip *chip)
 {
 	int rc, cc_soc_sw, cc_soc_delta_pct;
 	int64_t delta_cc_uah;
@@ -1263,30 +1311,39 @@
 	return 0;
 }
 
-static int fg_cap_learning_begin(struct fg_chip *chip, int batt_soc)
+#define BATT_SOC_32BIT	GENMASK(31, 0)
+static int fg_cap_learning_begin(struct fg_chip *chip, u32 batt_soc)
 {
-	int rc, cc_soc_sw;
+	int rc, cc_soc_sw, batt_soc_msb;
 
-	if (DIV_ROUND_CLOSEST(batt_soc * 100, FULL_SOC_RAW) >
+	batt_soc_msb = batt_soc >> 24;
+	if (DIV_ROUND_CLOSEST(batt_soc_msb * 100, FULL_SOC_RAW) >
 		chip->dt.cl_start_soc) {
 		fg_dbg(chip, FG_CAP_LEARN, "Battery SOC %d is high!, not starting\n",
-			batt_soc);
+			batt_soc_msb);
 		return -EINVAL;
 	}
 
-	chip->cl.init_cc_uah = div64_s64(chip->cl.learned_cc_uah * batt_soc,
+	chip->cl.init_cc_uah = div64_s64(chip->cl.learned_cc_uah * batt_soc_msb,
 					FULL_SOC_RAW);
-	rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc_sw);
+
+	/* Prime cc_soc_sw with battery SOC when capacity learning begins */
+	cc_soc_sw = div64_s64((int64_t)batt_soc * CC_SOC_30BIT,
+				BATT_SOC_32BIT);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_CC_SOC_SW].addr_word,
+		chip->sp[FG_SRAM_CC_SOC_SW].addr_byte, (u8 *)&cc_soc_sw,
+		chip->sp[FG_SRAM_CC_SOC_SW].len, FG_IMA_ATOMIC);
 	if (rc < 0) {
-		pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
-		return rc;
+		pr_err("Error in writing cc_soc_sw, rc=%d\n", rc);
+		goto out;
 	}
 
 	chip->cl.init_cc_soc_sw = cc_soc_sw;
 	chip->cl.active = true;
 	fg_dbg(chip, FG_CAP_LEARN, "Capacity learning started @ battery SOC %d init_cc_soc_sw:%d\n",
-		batt_soc, chip->cl.init_cc_soc_sw);
-	return 0;
+		batt_soc_msb, chip->cl.init_cc_soc_sw);
+out:
+	return rc;
 }
 
 static int fg_cap_learning_done(struct fg_chip *chip)
@@ -1318,7 +1375,7 @@
 #define FULL_SOC_RAW	255
 static void fg_cap_learning_update(struct fg_chip *chip)
 {
-	int rc, batt_soc;
+	int rc, batt_soc, batt_soc_msb;
 
 	mutex_lock(&chip->cl.lock);
 
@@ -1337,11 +1394,9 @@
 		goto out;
 	}
 
-	/* We need only the most significant byte here */
-	batt_soc = (u32)batt_soc >> 24;
-
+	batt_soc_msb = (u32)batt_soc >> 24;
 	fg_dbg(chip, FG_CAP_LEARN, "Chg_status: %d cl_active: %d batt_soc: %d\n",
-		chip->charge_status, chip->cl.active, batt_soc);
+		chip->charge_status, chip->cl.active, batt_soc_msb);
 
 	/* Initialize the starting point of learning capacity */
 	if (!chip->cl.active) {
@@ -1363,7 +1418,7 @@
 
 		if (chip->charge_status == POWER_SUPPLY_STATUS_NOT_CHARGING) {
 			fg_dbg(chip, FG_CAP_LEARN, "Capacity learning aborted @ battery SOC %d\n",
-				batt_soc);
+				batt_soc_msb);
 			chip->cl.active = false;
 			chip->cl.init_cc_uah = 0;
 		}
@@ -1470,16 +1525,8 @@
 		return 0;
 
 	mutex_lock(&chip->charge_full_lock);
-	if (!chip->charge_done && chip->bsoc_delta_irq_en) {
-		disable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
-		disable_irq_nosync(fg_irqs[BSOC_DELTA_IRQ].irq);
-		chip->bsoc_delta_irq_en = false;
-	} else if (chip->charge_done && !chip->bsoc_delta_irq_en) {
-		enable_irq(fg_irqs[BSOC_DELTA_IRQ].irq);
-		enable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
-		chip->bsoc_delta_irq_en = true;
-	}
-
+	vote(chip->delta_bsoc_irq_en_votable, DELTA_BSOC_IRQ_VOTER,
+		chip->charge_done, 0);
 	rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
 		&prop);
 	if (rc < 0) {
@@ -1598,6 +1645,9 @@
 	u64 scaling_factor;
 	u32 val = 0;
 
+	if (!chip->dt.rconn_mohms)
+		return 0;
+
 	rc = fg_sram_read(chip, PROFILE_INTEGRITY_WORD,
 			SW_CONFIG_OFFSET, (u8 *)&val, 1, FG_IMA_DEFAULT);
 	if (rc < 0) {
@@ -1696,6 +1746,9 @@
 	if (!chip->dt.auto_recharge_soc)
 		return 0;
 
+	if (recharge_soc < 0 || recharge_soc > FULL_CAPACITY)
+		return 0;
+
 	fg_encode(chip->sp, FG_SRAM_RECHARGE_SOC_THR, recharge_soc, &buf);
 	rc = fg_sram_write(chip,
 			chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_word,
@@ -1712,46 +1765,55 @@
 static int fg_adjust_recharge_soc(struct fg_chip *chip)
 {
 	int rc, msoc, recharge_soc, new_recharge_soc = 0;
+	bool recharge_soc_status;
 
 	if (!chip->dt.auto_recharge_soc)
 		return 0;
 
 	recharge_soc = chip->dt.recharge_soc_thr;
+	recharge_soc_status = chip->recharge_soc_adjusted;
 	/*
 	 * If the input is present and charging had been terminated, adjust
 	 * the recharge SOC threshold based on the monotonic SOC at which
 	 * the charge termination had happened.
 	 */
-	if (is_input_present(chip) && !chip->recharge_soc_adjusted
-		&& chip->charge_done) {
-		/* Get raw monotonic SOC for calculation */
-		rc = fg_get_msoc(chip, &msoc);
-		if (rc < 0) {
-			pr_err("Error in getting msoc, rc=%d\n", rc);
-			return rc;
-		}
+	if (is_input_present(chip)) {
+		if (chip->charge_done) {
+			if (!chip->recharge_soc_adjusted) {
+				/* Get raw monotonic SOC for calculation */
+				rc = fg_get_msoc(chip, &msoc);
+				if (rc < 0) {
+					pr_err("Error in getting msoc, rc=%d\n",
+						rc);
+					return rc;
+				}
 
-		/* Adjust the recharge_soc threshold */
-		new_recharge_soc = msoc - (FULL_CAPACITY - recharge_soc);
-	} else if (chip->recharge_soc_adjusted && (!is_input_present(chip)
-				|| chip->health == POWER_SUPPLY_HEALTH_GOOD)) {
+				/* Adjust the recharge_soc threshold */
+				new_recharge_soc = msoc - (FULL_CAPACITY -
+								recharge_soc);
+				chip->recharge_soc_adjusted = true;
+			} else {
+				/* adjusted already, do nothing */
+				return 0;
+			}
+		} else {
+			/* Charging, do nothing */
+			return 0;
+		}
+	} else {
 		/* Restore the default value */
 		new_recharge_soc = recharge_soc;
+		chip->recharge_soc_adjusted = false;
 	}
 
-	if (new_recharge_soc > 0 && new_recharge_soc < FULL_CAPACITY) {
-		rc = fg_set_recharge_soc(chip, new_recharge_soc);
-		if (rc) {
-			pr_err("Couldn't set resume SOC for FG, rc=%d\n", rc);
-			return rc;
-		}
-
-		chip->recharge_soc_adjusted = (new_recharge_soc !=
-						recharge_soc);
-		fg_dbg(chip, FG_STATUS, "resume soc set to %d\n",
-			new_recharge_soc);
+	rc = fg_set_recharge_soc(chip, new_recharge_soc);
+	if (rc < 0) {
+		chip->recharge_soc_adjusted = recharge_soc_status;
+		pr_err("Couldn't set resume SOC for FG, rc=%d\n", rc);
+		return rc;
 	}
 
+	fg_dbg(chip, FG_STATUS, "resume soc set to %d\n", new_recharge_soc);
 	return 0;
 }
 
@@ -2156,6 +2218,35 @@
 	return count;
 }
 
+static int fg_bp_params_config(struct fg_chip *chip)
+{
+	int rc = 0;
+	u8 buf;
+
+	/* This SRAM register is only present in v2.0 and above */
+	if (!(chip->wa_flags & PMI8998_V1_REV_WA) &&
+					chip->bp.float_volt_uv > 0) {
+		fg_encode(chip->sp, FG_SRAM_FLOAT_VOLT,
+			chip->bp.float_volt_uv / 1000, &buf);
+		rc = fg_sram_write(chip, chip->sp[FG_SRAM_FLOAT_VOLT].addr_word,
+			chip->sp[FG_SRAM_FLOAT_VOLT].addr_byte, &buf,
+			chip->sp[FG_SRAM_FLOAT_VOLT].len, FG_IMA_DEFAULT);
+		if (rc < 0) {
+			pr_err("Error in writing float_volt, rc=%d\n", rc);
+			return rc;
+		}
+	}
+
+	if (chip->bp.vbatt_full_mv > 0) {
+		rc = fg_set_constant_chg_voltage(chip,
+				chip->bp.vbatt_full_mv * 1000);
+		if (rc < 0)
+			return rc;
+	}
+
+	return rc;
+}
+
 #define PROFILE_LOAD_BIT	BIT(0)
 #define BOOTLOADER_LOAD_BIT	BIT(1)
 #define BOOTLOADER_RESTART_BIT	BIT(2)
@@ -2176,6 +2267,17 @@
 	/* Check if integrity bit is set */
 	if (val & PROFILE_LOAD_BIT) {
 		fg_dbg(chip, FG_STATUS, "Battery profile integrity bit is set\n");
+
+		/* Whitelist the values */
+		val &= ~PROFILE_LOAD_BIT;
+		if (val != HLOS_RESTART_BIT && val != BOOTLOADER_LOAD_BIT &&
+			val != (BOOTLOADER_LOAD_BIT | BOOTLOADER_RESTART_BIT)) {
+			val |= PROFILE_LOAD_BIT;
+			pr_warn("Garbage value in profile integrity word: 0x%x\n",
+				val);
+			return true;
+		}
+
 		rc = fg_sram_read(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
 				buf, PROFILE_COMP_LEN, FG_IMA_DEFAULT);
 		if (rc < 0) {
@@ -2323,6 +2425,11 @@
 	}
 
 done:
+	rc = fg_bp_params_config(chip);
+	if (rc < 0)
+		pr_err("Error in configuring battery profile params, rc:%d\n",
+			rc);
+
 	rc = fg_sram_read(chip, NOM_CAP_WORD, NOM_CAP_OFFSET, buf, 2,
 			FG_IMA_DEFAULT);
 	if (rc < 0) {
@@ -2806,7 +2913,7 @@
 		pval->intval = chip->cyc_ctr.id;
 		break;
 	case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
-		rc = fg_get_cc_soc(chip, &pval->intval);
+		rc = fg_get_charge_raw(chip, &pval->intval);
 		break;
 	case POWER_SUPPLY_PROP_CHARGE_NOW:
 		pval->intval = chip->cl.init_cc_uah;
@@ -2815,7 +2922,7 @@
 		pval->intval = chip->cl.learned_cc_uah;
 		break;
 	case POWER_SUPPLY_PROP_CHARGE_COUNTER:
-		rc = fg_get_cc_soc_sw(chip, &pval->intval);
+		rc = fg_get_charge_counter(chip, &pval->intval);
 		break;
 	case POWER_SUPPLY_PROP_TIME_TO_FULL_AVG:
 		rc = fg_get_time_to_full(chip, &pval->intval);
@@ -2974,27 +3081,6 @@
 		return rc;
 	}
 
-	/* This SRAM register is only present in v2.0 and above */
-	if (!(chip->wa_flags & PMI8998_V1_REV_WA) &&
-					chip->bp.float_volt_uv > 0) {
-		fg_encode(chip->sp, FG_SRAM_FLOAT_VOLT,
-			chip->bp.float_volt_uv / 1000, buf);
-		rc = fg_sram_write(chip, chip->sp[FG_SRAM_FLOAT_VOLT].addr_word,
-			chip->sp[FG_SRAM_FLOAT_VOLT].addr_byte, buf,
-			chip->sp[FG_SRAM_FLOAT_VOLT].len, FG_IMA_DEFAULT);
-		if (rc < 0) {
-			pr_err("Error in writing float_volt, rc=%d\n", rc);
-			return rc;
-		}
-	}
-
-	if (chip->bp.vbatt_full_mv > 0) {
-		rc = fg_set_constant_chg_voltage(chip,
-				chip->bp.vbatt_full_mv * 1000);
-		if (rc < 0)
-			return rc;
-	}
-
 	fg_encode(chip->sp, FG_SRAM_CHG_TERM_CURR, chip->dt.chg_term_curr_ma,
 		buf);
 	rc = fg_sram_write(chip, chip->sp[FG_SRAM_CHG_TERM_CURR].addr_word,
@@ -3164,12 +3250,10 @@
 		return rc;
 	}
 
-	if (chip->dt.rconn_mohms > 0) {
-		rc = fg_rconn_config(chip);
-		if (rc < 0) {
-			pr_err("Error in configuring Rconn, rc=%d\n", rc);
-			return rc;
-		}
+	rc = fg_rconn_config(chip);
+	if (rc < 0) {
+		pr_err("Error in configuring Rconn, rc=%d\n", rc);
+		return rc;
 	}
 
 	fg_encode(chip->sp, FG_SRAM_ESR_TIGHT_FILTER,
@@ -3192,6 +3276,24 @@
 		return rc;
 	}
 
+	fg_encode(chip->sp, FG_SRAM_ESR_PULSE_THRESH,
+		chip->dt.esr_pulse_thresh_ma, buf);
+	rc = fg_sram_write(chip, chip->sp[FG_SRAM_ESR_PULSE_THRESH].addr_word,
+			chip->sp[FG_SRAM_ESR_PULSE_THRESH].addr_byte, buf,
+			chip->sp[FG_SRAM_ESR_PULSE_THRESH].len, FG_IMA_DEFAULT);
+	if (rc < 0) {
+		pr_err("Error in writing esr_pulse_thresh_ma, rc=%d\n", rc);
+		return rc;
+	}
+
+	get_esr_meas_current(chip->dt.esr_meas_curr_ma, &val);
+	rc = fg_masked_write(chip, BATT_INFO_ESR_PULL_DN_CFG(chip),
+			ESR_PULL_DOWN_IVAL_MASK, val);
+	if (rc < 0) {
+		pr_err("Error in writing esr_meas_curr_ma, rc=%d\n", rc);
+		return rc;
+	}
+
 	return 0;
 }
 
@@ -3216,20 +3318,19 @@
 	}
 
 	fg_dbg(chip, FG_IRQ, "irq %d triggered, status:%d\n", irq, status);
-	if (status & MEM_XCP_BIT) {
-		rc = fg_clear_dma_errors_if_any(chip);
-		if (rc < 0) {
-			pr_err("Error in clearing DMA error, rc=%d\n", rc);
-			return IRQ_HANDLED;
-		}
 
-		mutex_lock(&chip->sram_rw_lock);
+	mutex_lock(&chip->sram_rw_lock);
+	rc = fg_clear_dma_errors_if_any(chip);
+	if (rc < 0)
+		pr_err("Error in clearing DMA error, rc=%d\n", rc);
+
+	if (status & MEM_XCP_BIT) {
 		rc = fg_clear_ima_errors_if_any(chip, true);
 		if (rc < 0 && rc != -EAGAIN)
 			pr_err("Error in checking IMA errors rc:%d\n", rc);
-		mutex_unlock(&chip->sram_rw_lock);
 	}
 
+	mutex_unlock(&chip->sram_rw_lock);
 	return IRQ_HANDLED;
 }
 
@@ -3676,6 +3777,8 @@
 #define DEFAULT_ESR_TIGHT_LT_FLT_UPCT	48829
 #define DEFAULT_ESR_BROAD_LT_FLT_UPCT	148438
 #define DEFAULT_ESR_CLAMP_MOHMS		20
+#define DEFAULT_ESR_PULSE_THRESH_MA	110
+#define DEFAULT_ESR_MEAS_CURR_MA	120
 static int fg_parse_dt(struct fg_chip *chip)
 {
 	struct device_node *child, *revid_node, *node = chip->dev->of_node;
@@ -3725,6 +3828,7 @@
 	case PM660_SUBTYPE:
 		chip->sp = pmi8998_v2_sram_params;
 		chip->alg_flags = pmi8998_v2_alg_flags;
+		chip->use_ima_single_mode = true;
 		break;
 	default:
 		return -EINVAL;
@@ -3945,9 +4049,7 @@
 		pr_err("Error in parsing Ki coefficients, rc=%d\n", rc);
 
 	rc = of_property_read_u32(node, "qcom,fg-rconn-mohms", &temp);
-	if (rc < 0)
-		chip->dt.rconn_mohms = -EINVAL;
-	else
+	if (!rc)
 		chip->dt.rconn_mohms = temp;
 
 	rc = of_property_read_u32(node, "qcom,fg-esr-filter-switch-temp",
@@ -3995,6 +4097,22 @@
 	else
 		chip->dt.esr_clamp_mohms = temp;
 
+	chip->dt.esr_pulse_thresh_ma = DEFAULT_ESR_PULSE_THRESH_MA;
+	rc = of_property_read_u32(node, "qcom,fg-esr-pulse-thresh-ma", &temp);
+	if (!rc) {
+		/* ESR pulse qualification threshold range is 1-997 mA */
+		if (temp > 0 && temp < 997)
+			chip->dt.esr_pulse_thresh_ma = temp;
+	}
+
+	chip->dt.esr_meas_curr_ma = DEFAULT_ESR_MEAS_CURR_MA;
+	rc = of_property_read_u32(node, "qcom,fg-esr-meas-curr-ma", &temp);
+	if (!rc) {
+		/* ESR measurement current range is 60-240 mA */
+		if (temp >= 60 || temp <= 240)
+			chip->dt.esr_meas_curr_ma = temp;
+	}
+
 	return 0;
 }
 
@@ -4005,6 +4123,9 @@
 	if (chip->awake_votable)
 		destroy_votable(chip->awake_votable);
 
+	if (chip->delta_bsoc_irq_en_votable)
+		destroy_votable(chip->delta_bsoc_irq_en_votable);
+
 	if (chip->batt_id_chan)
 		iio_channel_release(chip->batt_id_chan);
 
@@ -4046,7 +4167,15 @@
 					chip);
 	if (IS_ERR(chip->awake_votable)) {
 		rc = PTR_ERR(chip->awake_votable);
-		return rc;
+		goto exit;
+	}
+
+	chip->delta_bsoc_irq_en_votable = create_votable("FG_DELTA_BSOC_IRQ",
+						VOTE_SET_ANY,
+						fg_delta_bsoc_irq_en_cb, chip);
+	if (IS_ERR(chip->delta_bsoc_irq_en_votable)) {
+		rc = PTR_ERR(chip->delta_bsoc_irq_en_votable);
+		goto exit;
 	}
 
 	rc = fg_parse_dt(chip);
@@ -4073,7 +4202,7 @@
 	rc = fg_get_batt_id(chip);
 	if (rc < 0) {
 		pr_err("Error in getting battery id, rc:%d\n", rc);
-		return rc;
+		goto exit;
 	}
 
 	rc = fg_get_batt_profile(chip);
@@ -4131,11 +4260,7 @@
 		disable_irq_nosync(fg_irqs[SOC_UPDATE_IRQ].irq);
 
 	/* Keep BSOC_DELTA_IRQ irq disabled until we require it */
-	if (fg_irqs[BSOC_DELTA_IRQ].irq) {
-		disable_irq_wake(fg_irqs[BSOC_DELTA_IRQ].irq);
-		disable_irq_nosync(fg_irqs[BSOC_DELTA_IRQ].irq);
-		chip->bsoc_delta_irq_en = false;
-	}
+	rerun_election(chip->delta_bsoc_irq_en_votable);
 
 	rc = fg_debugfs_create(chip);
 	if (rc < 0) {
diff --git a/drivers/power/supply/qcom/qpnp-qnovo.c b/drivers/power/supply/qcom/qpnp-qnovo.c
index cbfab30..c74dc89 100644
--- a/drivers/power/supply/qcom/qpnp-qnovo.c
+++ b/drivers/power/supply/qcom/qpnp-qnovo.c
@@ -19,7 +19,7 @@
 #include <linux/of.h>
 #include <linux/of_irq.h>
 #include <linux/qpnp/qpnp-revid.h>
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
 
 #define QNOVO_REVISION1		0x00
 #define QNOVO_REVISION2		0x01
@@ -29,6 +29,8 @@
 #define QNOVO_PTRAIN_STS	0x08
 #define QNOVO_ERROR_STS		0x09
 #define QNOVO_ERROR_BIT		BIT(0)
+#define QNOVO_ERROR_STS2	0x0A
+#define QNOVO_ERROR_CHARGING_DISABLED	BIT(1)
 #define QNOVO_INT_RT_STS	0x10
 #define QNOVO_INT_SET_TYPE	0x11
 #define QNOVO_INT_POLARITY_HIGH	0x12
@@ -109,20 +111,6 @@
 	struct device_node	*revid_dev_node;
 };
 
-enum {
-	QNOVO_NO_ERR_STS_BIT		= BIT(0),
-};
-
-struct chg_props {
-	bool		charging;
-	bool		usb_online;
-	bool		dc_online;
-};
-
-struct chg_status {
-	bool		ok_to_qnovo;
-};
-
 struct qnovo {
 	int			base;
 	struct mutex		write_lock;
@@ -141,13 +129,10 @@
 	s64			v_gain_mega;
 	struct notifier_block	nb;
 	struct power_supply	*batt_psy;
-	struct power_supply	*usb_psy;
-	struct power_supply	*dc_psy;
-	struct chg_props	cp;
-	struct chg_status	cs;
 	struct work_struct	status_change_work;
 	int			fv_uV_request;
 	int			fcc_uA_request;
+	bool			ok_to_qnovo;
 };
 
 static int debug_mask;
@@ -272,28 +257,22 @@
 					const char *client)
 {
 	struct qnovo *chip = data;
-	int rc = 0;
+	union power_supply_propval pval = {0};
+	int rc;
 
-	if (disable) {
-		rc = qnovo_batt_psy_update(chip, true);
-		if (rc < 0)
-			return rc;
-	}
+	if (!is_batt_available(chip))
+		return -EINVAL;
 
-	rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
-				 disable ? 0 : QNOVO_PTRAIN_EN_BIT);
+	pval.intval = !disable;
+	rc = power_supply_set_property(chip->batt_psy,
+			POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE,
+			&pval);
 	if (rc < 0) {
-		dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n",
-			disable ? "disable" : "enable", rc);
-		return rc;
+		pr_err("Couldn't set prop qnovo_enable rc = %d\n", rc);
+		return -EINVAL;
 	}
 
-	if (!disable) {
-		rc = qnovo_batt_psy_update(chip, false);
-		if (rc < 0)
-			return rc;
-	}
-
+	rc = qnovo_batt_psy_update(chip, disable);
 	return rc;
 }
 
@@ -325,36 +304,18 @@
 	return 0;
 }
 
-static int qnovo_check_chg_version(struct qnovo *chip)
-{
-	int rc;
-
-	chip->pmic_rev_id = get_revid_data(chip->dt.revid_dev_node);
-	if (IS_ERR(chip->pmic_rev_id)) {
-		rc = PTR_ERR(chip->pmic_rev_id);
-		if (rc != -EPROBE_DEFER)
-			pr_err("Unable to get pmic_revid rc=%d\n", rc);
-		return rc;
-	}
-
-	if ((chip->pmic_rev_id->pmic_subtype == PMI8998_SUBTYPE)
-		   && (chip->pmic_rev_id->rev4 < PMI8998_V2P0_REV4)) {
-		chip->wa_flags |= QNOVO_NO_ERR_STS_BIT;
-	}
-
-	return 0;
-}
-
 enum {
 	VER = 0,
 	OK_TO_QNOVO,
-	ENABLE,
+	QNOVO_ENABLE,
+	PT_ENABLE,
 	FV_REQUEST,
 	FCC_REQUEST,
 	PE_CTRL_REG,
 	PE_CTRL2_REG,
 	PTRAIN_STS_REG,
 	INT_RT_STS_REG,
+	ERR_STS2_REG,
 	PREST1,
 	PPULS1,
 	NREST1,
@@ -394,6 +355,12 @@
 };
 
 static struct param_info params[] = {
+	[PT_ENABLE] = {
+		.name			= "PT_ENABLE",
+		.start_addr		= QNOVO_PTRAIN_EN,
+		.num_regs		= 1,
+		.units_str		= "",
+	},
 	[FV_REQUEST] = {
 		.units_str		= "uV",
 	},
@@ -424,6 +391,12 @@
 		.num_regs		= 1,
 		.units_str		= "",
 	},
+	[ERR_STS2_REG] = {
+		.name			= "RAW_CHGR_ERR",
+		.start_addr		= QNOVO_ERROR_STS2,
+		.num_regs		= 1,
+		.units_str		= "",
+	},
 	[PREST1] = {
 		.name			= "PREST1",
 		.start_addr		= QNOVO_PREST1_CTRL,
@@ -431,7 +404,7 @@
 		.reg_to_unit_multiplier	= 5,
 		.reg_to_unit_divider	= 1,
 		.min_val		= 5,
-		.max_val		= 1275,
+		.max_val		= 255,
 		.units_str		= "mS",
 	},
 	[PPULS1] = {
@@ -440,8 +413,8 @@
 		.num_regs		= 2,
 		.reg_to_unit_multiplier	= 1600, /* converts to uC */
 		.reg_to_unit_divider	= 1,
-		.min_val		= 0,
-		.max_val		= 104856000,
+		.min_val		= 30000,
+		.max_val		= 65535000,
 		.units_str		= "uC",
 	},
 	[NREST1] = {
@@ -451,7 +424,7 @@
 		.reg_to_unit_multiplier	= 5,
 		.reg_to_unit_divider	= 1,
 		.min_val		= 5,
-		.max_val		= 1275,
+		.max_val		= 255,
 		.units_str		= "mS",
 	},
 	[NPULS1] = {
@@ -460,8 +433,8 @@
 		.num_regs		= 1,
 		.reg_to_unit_multiplier	= 5,
 		.reg_to_unit_divider	= 1,
-		.min_val		= 5,
-		.max_val		= 1275,
+		.min_val		= 0,
+		.max_val		= 255,
 		.units_str		= "mS",
 	},
 	[PPCNT] = {
@@ -470,7 +443,7 @@
 		.num_regs		= 1,
 		.reg_to_unit_multiplier	= 1,
 		.reg_to_unit_divider	= 1,
-		.min_val		= 0,
+		.min_val		= 1,
 		.max_val		= 255,
 		.units_str		= "pulses",
 	},
@@ -480,8 +453,8 @@
 		.num_regs		= 2,
 		.reg_to_unit_multiplier	= 610350, /* converts to nV */
 		.reg_to_unit_divider	= 1,
-		.min_val		= 0,
-		.max_val		= 5000000,
+		.min_val		= 2200000,
+		.max_val		= 4500000,
 		.units_str		= "uV",
 	},
 	[PVOLT1] = {
@@ -506,8 +479,6 @@
 		.num_regs		= 1,
 		.reg_to_unit_multiplier	= 2,
 		.reg_to_unit_divider	= 1,
-		.min_val		= 5,
-		.max_val		= 1275,
 		.units_str		= "S",
 	},
 	[PREST2] = {
@@ -517,7 +488,7 @@
 		.reg_to_unit_multiplier	= 5,
 		.reg_to_unit_divider	= 1,
 		.min_val		= 5,
-		.max_val		= 327675,
+		.max_val		= 65535,
 		.units_str		= "mS",
 	},
 	[PPULS2] = {
@@ -526,8 +497,8 @@
 		.num_regs		= 2,
 		.reg_to_unit_multiplier	= 1600, /* converts to uC */
 		.reg_to_unit_divider	= 1,
-		.min_val		= 0,
-		.max_val		= 104856000,
+		.min_val		= 30000,
+		.max_val		= 65535000,
 		.units_str		= "uC",
 	},
 	[NREST2] = {
@@ -538,7 +509,7 @@
 		.reg_to_unit_divider	= 1,
 		.reg_to_unit_offset	= -5,
 		.min_val		= 5,
-		.max_val		= 1280,
+		.max_val		= 255,
 		.units_str		= "mS",
 	},
 	[NPULS2] = {
@@ -547,18 +518,18 @@
 		.num_regs		= 1,
 		.reg_to_unit_multiplier	= 5,
 		.reg_to_unit_divider	= 1,
-		.min_val		= 5,
-		.max_val		= 1275,
+		.min_val		= 0,
+		.max_val		= 255,
 		.units_str		= "mS",
 	},
 	[VLIM2] = {
-		.name			= "VLIM1",
+		.name			= "VLIM2",
 		.start_addr		= QNOVO_VLIM2_LSB_CTRL,
 		.num_regs		= 2,
 		.reg_to_unit_multiplier	= 610350, /* converts to nV */
 		.reg_to_unit_divider	= 1,
-		.min_val		= 0,
-		.max_val		= 5000000,
+		.min_val		= 2200000,
+		.max_val		= 4500000,
 		.units_str		= "uV",
 	},
 	[PVOLT2] = {
@@ -591,6 +562,8 @@
 		.num_regs		= 1,
 		.reg_to_unit_multiplier	= 1,
 		.reg_to_unit_divider	= 1,
+		.min_val		= 0,
+		.max_val		= 255,
 		.units_str		= "pulses",
 	},
 	[VMAX] = {
@@ -645,33 +618,73 @@
 {
 	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
 
-	return snprintf(buf, PAGE_SIZE, "%d\n", chip->cs.ok_to_qnovo);
+	return snprintf(buf, PAGE_SIZE, "%d\n", chip->ok_to_qnovo);
 }
 
-static ssize_t enable_show(struct class *c, struct class_attribute *attr,
+static ssize_t qnovo_enable_show(struct class *c, struct class_attribute *attr,
 			char *ubuf)
 {
 	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
-	int val;
+	int val = get_effective_result(chip->disable_votable);
 
-	val = get_client_vote(chip->disable_votable, USER_VOTER);
-	val = !val;
-	return snprintf(ubuf, PAGE_SIZE, "%d\n", val);
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", !val);
 }
 
-static ssize_t enable_store(struct class *c, struct class_attribute *attr,
+static ssize_t qnovo_enable_store(struct class *c, struct class_attribute *attr,
 			const char *ubuf, size_t count)
 {
 	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
 	unsigned long val;
-	bool disable;
 
-	if (kstrtoul(ubuf, 10, &val))
+	if (kstrtoul(ubuf, 0, &val))
 		return -EINVAL;
 
-	disable = !val;
+	vote(chip->disable_votable, USER_VOTER, !val, 0);
 
-	vote(chip->disable_votable, USER_VOTER, disable, 0);
+	return count;
+}
+
+static ssize_t pt_enable_show(struct class *c, struct class_attribute *attr,
+			char *ubuf)
+{
+	int i = attr - qnovo_attributes;
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	u8 buf[2] = {0, 0};
+	u16 regval;
+	int rc;
+
+	rc = qnovo_read(chip, params[i].start_addr, buf, params[i].num_regs);
+	if (rc < 0) {
+		pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
+		return -EINVAL;
+	}
+	regval = buf[1] << 8 | buf[0];
+
+	return snprintf(ubuf, PAGE_SIZE, "%d\n",
+				(int)(regval & QNOVO_PTRAIN_EN_BIT));
+}
+
+static ssize_t pt_enable_store(struct class *c, struct class_attribute *attr,
+			const char *ubuf, size_t count)
+{
+	struct qnovo *chip = container_of(c, struct qnovo, qnovo_class);
+	unsigned long val;
+	int rc = 0;
+
+	if (get_effective_result(chip->disable_votable))
+		return -EINVAL;
+
+	if (kstrtoul(ubuf, 0, &val))
+		return -EINVAL;
+
+	rc = qnovo_masked_write(chip, QNOVO_PTRAIN_EN, QNOVO_PTRAIN_EN_BIT,
+				 (bool)val ? QNOVO_PTRAIN_EN_BIT : 0);
+	if (rc < 0) {
+		dev_err(chip->dev, "Couldn't %s pulse train rc=%d\n",
+			(bool)val ? "enable" : "disable", rc);
+		return rc;
+	}
+
 	return count;
 }
 
@@ -688,7 +701,7 @@
 	if (i == FCC_REQUEST)
 		val = chip->fcc_uA_request;
 
-	return snprintf(ubuf, PAGE_SIZE, "%d%s\n", val, params[i].units_str);
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", val);
 }
 
 static ssize_t val_store(struct class *c, struct class_attribute *attr,
@@ -698,7 +711,7 @@
 	int i = attr - qnovo_attributes;
 	unsigned long val;
 
-	if (kstrtoul(ubuf, 10, &val))
+	if (kstrtoul(ubuf, 0, &val))
 		return -EINVAL;
 
 	if (i == FV_REQUEST)
@@ -707,6 +720,9 @@
 	if (i == FCC_REQUEST)
 		chip->fcc_uA_request = val;
 
+	if (!get_effective_result(chip->disable_votable))
+		qnovo_batt_psy_update(chip, false);
+
 	return count;
 }
 
@@ -726,8 +742,7 @@
 	}
 	regval = buf[1] << 8 | buf[0];
 
-	return snprintf(ubuf, PAGE_SIZE, "0x%04x%s\n",
-			regval, params[i].units_str);
+	return snprintf(ubuf, PAGE_SIZE, "0x%04x\n", regval);
 }
 
 static ssize_t reg_store(struct class *c, struct class_attribute *attr,
@@ -739,7 +754,7 @@
 	unsigned long val;
 	int rc;
 
-	if (kstrtoul(ubuf, 16, &val))
+	if (kstrtoul(ubuf, 0, &val))
 		return -EINVAL;
 
 	buf[0] = val & 0xFF;
@@ -774,7 +789,7 @@
 			/ params[i].reg_to_unit_divider)
 		- params[i].reg_to_unit_offset;
 
-	return snprintf(ubuf, PAGE_SIZE, "%d%s\n", val, params[i].units_str);
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", val);
 }
 
 static ssize_t time_store(struct class *c, struct class_attribute *attr,
@@ -787,7 +802,7 @@
 	unsigned long val;
 	int rc;
 
-	if (kstrtoul(ubuf, 10, &val))
+	if (kstrtoul(ubuf, 0, &val))
 		return -EINVAL;
 
 	if (val < params[i].min_val || val > params[i].max_val) {
@@ -828,7 +843,11 @@
 		pr_err("Couldn't read %s rc = %d\n", params[i].name, rc);
 		return -EINVAL;
 	}
-	regval_nA = buf[1] << 8 | buf[0];
+
+	if (buf[1] & BIT(5))
+		buf[1] |= GENMASK(7, 6);
+
+	regval_nA = (s16)(buf[1] << 8 | buf[0]);
 	regval_nA = div_s64(regval_nA * params[i].reg_to_unit_multiplier,
 					params[i].reg_to_unit_divider)
 			- params[i].reg_to_unit_offset;
@@ -841,11 +860,10 @@
 		gain = chip->internal_i_gain_mega;
 	}
 
-	comp_val_nA = div_s64(regval_nA * gain, 1000000) + offset_nA;
+	comp_val_nA = div_s64(regval_nA * gain, 1000000) - offset_nA;
 	comp_val_uA = div_s64(comp_val_nA, 1000);
 
-	return snprintf(ubuf, PAGE_SIZE, "%d%s\n",
-			comp_val_uA, params[i].units_str);
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", comp_val_uA);
 }
 
 static ssize_t voltage_show(struct class *c, struct class_attribute *attr,
@@ -875,8 +893,7 @@
 	comp_val_nV = div_s64(regval_nV * gain, 1000000) + offset_nV;
 	comp_val_uV = div_s64(comp_val_nV, 1000);
 
-	return snprintf(ubuf, PAGE_SIZE, "%d%s\n",
-				comp_val_uV, params[i].units_str);
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", comp_val_uV);
 }
 
 static ssize_t voltage_store(struct class *c, struct class_attribute *attr,
@@ -890,7 +907,7 @@
 	s64 regval_nV;
 	s64 gain, offset_nV;
 
-	if (kstrtoul(ubuf, 10, &val_uV))
+	if (kstrtoul(ubuf, 0, &val_uV))
 		return -EINVAL;
 
 	if (val_uV < params[i].min_val || val_uV > params[i].max_val) {
@@ -947,8 +964,7 @@
 		gain = chip->internal_i_gain_mega;
 
 	comp_val_uC = div_s64(regval_uC * gain, 1000000);
-	return snprintf(ubuf, PAGE_SIZE, "%d%s\n",
-			comp_val_uC, params[i].units_str);
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", comp_val_uC);
 }
 
 static ssize_t coulomb_store(struct class *c, struct class_attribute *attr,
@@ -962,7 +978,7 @@
 	s64 regval;
 	s64 gain;
 
-	if (kstrtoul(ubuf, 10, &val_uC))
+	if (kstrtoul(ubuf, 0, &val_uC))
 		return -EINVAL;
 
 	if (val_uC < params[i].min_val || val_uC > params[i].max_val) {
@@ -1014,15 +1030,14 @@
 		return -EINVAL;
 	}
 
-	return snprintf(ubuf, PAGE_SIZE, "%d%s\n",
-			pval.intval, params[i].units_str);
+	return snprintf(ubuf, PAGE_SIZE, "%d\n", pval.intval);
 }
 
 static struct class_attribute qnovo_attributes[] = {
 	[VER]			= __ATTR_RO(version),
 	[OK_TO_QNOVO]		= __ATTR_RO(ok_to_qnovo),
-	[ENABLE]		= __ATTR(enable, 0644,
-					enable_show, enable_store),
+	[QNOVO_ENABLE]		= __ATTR_RW(qnovo_enable),
+	[PT_ENABLE]		= __ATTR_RW(pt_enable),
 	[FV_REQUEST]		= __ATTR(fv_uV_request, 0644,
 					val_show, val_store),
 	[FCC_REQUEST]		= __ATTR(fcc_uA_request, 0644,
@@ -1031,10 +1046,12 @@
 					reg_show, reg_store),
 	[PE_CTRL2_REG]		= __ATTR(PE_CTRL2_REG, 0644,
 					reg_show, reg_store),
-	[PTRAIN_STS_REG]	= __ATTR(PTRAIN_STS_REG, 0644,
-					reg_show, reg_store),
-	[INT_RT_STS_REG]	= __ATTR(INT_RT_STS_REG, 0644,
-					reg_show, reg_store),
+	[PTRAIN_STS_REG]	= __ATTR(PTRAIN_STS_REG, 0444,
+					reg_show, NULL),
+	[INT_RT_STS_REG]	= __ATTR(INT_RT_STS_REG, 0444,
+					reg_show, NULL),
+	[ERR_STS2_REG]		= __ATTR(ERR_STS2_REG, 0444,
+					reg_show, NULL),
 	[PREST1]		= __ATTR(PREST1_mS, 0644,
 					time_show, time_store),
 	[PPULS1]		= __ATTR(PPULS1_uC, 0644,
@@ -1055,7 +1072,7 @@
 					time_show, NULL),
 	[PREST2]		= __ATTR(PREST2_mS, 0644,
 					time_show, time_store),
-	[PPULS2]		= __ATTR(PPULS2_mS, 0644,
+	[PPULS2]		= __ATTR(PPULS2_uC, 0644,
 					coulomb_show, coulomb_store),
 	[NREST2]		= __ATTR(NREST2_mS, 0644,
 					time_show, time_store),
@@ -1073,8 +1090,8 @@
 					time_show, time_store),
 	[VMAX]			= __ATTR(VMAX_uV, 0444,
 					voltage_show, NULL),
-	[SNUM]			= __ATTR(SNUM, 0644,
-					time_show, time_store),
+	[SNUM]			= __ATTR(SNUM, 0444,
+					time_show, NULL),
 	[VBATT]			= __ATTR(VBATT_uV, 0444,
 					batt_prop_show, NULL),
 	[IBATT]			= __ATTR(IBATT_uA, 0444,
@@ -1086,95 +1103,40 @@
 	__ATTR_NULL,
 };
 
-static void get_chg_props(struct qnovo *chip, struct chg_props *cp)
+static int qnovo_update_status(struct qnovo *chip)
 {
-	union power_supply_propval pval;
 	u8 val = 0;
 	int rc;
+	bool charging;
+	bool changed = false;
 
-	cp->charging = true;
-	rc = qnovo_read(chip, QNOVO_ERROR_STS, &val, 1);
+	rc = qnovo_read(chip, QNOVO_ERROR_STS2, &val, 1);
 	if (rc < 0) {
 		pr_err("Couldn't read error sts rc = %d\n", rc);
-		cp->charging = false;
+		charging = false;
 	} else {
-		cp->charging = (!(val & QNOVO_ERROR_BIT));
+		charging = !(val & QNOVO_ERROR_CHARGING_DISABLED);
 	}
 
-	if (chip->wa_flags & QNOVO_NO_ERR_STS_BIT) {
-		/*
-		 * on v1.0 and v1.1 pmic's force charging to true
-		 * if things are not good to charge s/w gets a PTRAIN_DONE
-		 * interrupt
-		 */
-		cp->charging = true;
+	if (chip->ok_to_qnovo ^ charging) {
+
+		vote(chip->disable_votable, OK_TO_QNOVO_VOTER, !charging, 0);
+		if (!charging)
+			vote(chip->disable_votable, USER_VOTER, true, 0);
+
+		chip->ok_to_qnovo = charging;
+		changed = true;
 	}
 
-	cp->usb_online = false;
-	if (!chip->usb_psy)
-		chip->usb_psy = power_supply_get_by_name("usb");
-	if (chip->usb_psy) {
-		rc = power_supply_get_property(chip->usb_psy,
-				POWER_SUPPLY_PROP_ONLINE, &pval);
-		if (rc < 0)
-			pr_err("Couldn't read usb online rc = %d\n", rc);
-		else
-			cp->usb_online = (bool)pval.intval;
-	}
-
-	cp->dc_online = false;
-	if (!chip->dc_psy)
-		chip->dc_psy = power_supply_get_by_name("dc");
-	if (chip->dc_psy) {
-		rc = power_supply_get_property(chip->dc_psy,
-				POWER_SUPPLY_PROP_ONLINE, &pval);
-		if (rc < 0)
-			pr_err("Couldn't read dc online rc = %d\n", rc);
-		else
-			cp->dc_online = (bool)pval.intval;
-	}
-}
-
-static void get_chg_status(struct qnovo *chip, const struct chg_props *cp,
-				struct chg_status *cs)
-{
-	cs->ok_to_qnovo = false;
-
-	if (cp->charging &&
-		(cp->usb_online || cp->dc_online))
-		cs->ok_to_qnovo = true;
+	return changed;
 }
 
 static void status_change_work(struct work_struct *work)
 {
 	struct qnovo *chip = container_of(work,
 			struct qnovo, status_change_work);
-	bool notify_uevent = false;
-	struct chg_props cp;
-	struct chg_status cs;
 
-	get_chg_props(chip, &cp);
-	get_chg_status(chip, &cp, &cs);
-
-	if (cs.ok_to_qnovo ^ chip->cs.ok_to_qnovo) {
-		/*
-		 * when it is not okay to Qnovo charge, disable both voters,
-		 * so that when it becomes okay to Qnovo charge the user voter
-		 * has to specifically enable its vote to being Qnovo charging
-		 */
-		if (!cs.ok_to_qnovo) {
-			vote(chip->disable_votable, OK_TO_QNOVO_VOTER, 1, 0);
-			vote(chip->disable_votable, USER_VOTER, 1, 0);
-		} else {
-			vote(chip->disable_votable, OK_TO_QNOVO_VOTER, 0, 0);
-		}
-		notify_uevent = true;
-	}
-
-	memcpy(&chip->cp, &cp, sizeof(struct chg_props));
-	memcpy(&chip->cs, &cs, sizeof(struct chg_status));
-
-	if (notify_uevent)
+	if (qnovo_update_status(chip))
 		kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
 }
 
@@ -1186,8 +1148,8 @@
 
 	if (ev != PSY_EVENT_PROP_CHANGED)
 		return NOTIFY_OK;
-	if ((strcmp(psy->desc->name, "battery") == 0)
-		|| (strcmp(psy->desc->name, "usb") == 0))
+
+	if (strcmp(psy->desc->name, "battery") == 0)
 		schedule_work(&chip->status_change_work);
 
 	return NOTIFY_OK;
@@ -1197,8 +1159,7 @@
 {
 	struct qnovo *chip = data;
 
-	/* disable user voter here */
-	vote(chip->disable_votable, USER_VOTER, 0, 0);
+	qnovo_update_status(chip);
 	kobject_uevent(&chip->dev->kobj, KOBJ_CHANGE);
 	return IRQ_HANDLED;
 }
@@ -1211,7 +1172,14 @@
 	u8 vadc_offset, vadc_gain;
 	u8 val;
 
-	vote(chip->disable_votable, USER_VOTER, 1, 0);
+	vote(chip->disable_votable, USER_VOTER, true, 0);
+
+	val = 0;
+	rc = qnovo_write(chip, QNOVO_STRM_CTRL, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't write iadc bitstream control rc = %d\n", rc);
+		return rc;
+	}
 
 	rc = qnovo_read(chip, QNOVO_IADC_OFFSET_0, &iadc_offset_external, 1);
 	if (rc < 0) {
@@ -1219,12 +1187,28 @@
 		return rc;
 	}
 
+	/* stored as an 8 bit 2's complement signed integer */
+	val = -1 * iadc_offset_external;
+	rc = qnovo_write(chip, QNOVO_TR_IADC_OFFSET_0, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't write iadc offset rc = %d\n", rc);
+		return rc;
+	}
+
 	rc = qnovo_read(chip, QNOVO_IADC_OFFSET_1, &iadc_offset_internal, 1);
 	if (rc < 0) {
 		pr_err("Couldn't read iadc internal offset rc = %d\n", rc);
 		return rc;
 	}
 
+	/* stored as an 8 bit 2's complement signed integer */
+	val = -1 * iadc_offset_internal;
+	rc = qnovo_write(chip, QNOVO_TR_IADC_OFFSET_1, &val, 1);
+	if (rc < 0) {
+		pr_err("Couldn't write iadc offset rc = %d\n", rc);
+		return rc;
+	}
+
 	rc = qnovo_read(chip, QNOVO_IADC_GAIN_0, &iadc_gain_external, 1);
 	if (rc < 0) {
 		pr_err("Couldn't read iadc external gain rc = %d\n", rc);
@@ -1249,53 +1233,20 @@
 		return rc;
 	}
 
-	chip->external_offset_nA = (s64)iadc_offset_external * IADC_LSB_NA;
-	chip->internal_offset_nA = (s64)iadc_offset_internal * IADC_LSB_NA;
-	chip->offset_nV = (s64)vadc_offset * VADC_LSB_NA;
+	chip->external_offset_nA = (s64)(s8)iadc_offset_external * IADC_LSB_NA;
+	chip->internal_offset_nA = (s64)(s8)iadc_offset_internal * IADC_LSB_NA;
+	chip->offset_nV = (s64)(s8)vadc_offset * VADC_LSB_NA;
 	chip->external_i_gain_mega
-		= 1000000000 + (s64)iadc_gain_external * GAIN_LSB_FACTOR;
+		= 1000000000 + (s64)(s8)iadc_gain_external * GAIN_LSB_FACTOR;
 	chip->external_i_gain_mega
 		= div_s64(chip->external_i_gain_mega, 1000);
 	chip->internal_i_gain_mega
-		= 1000000000 + (s64)iadc_gain_internal * GAIN_LSB_FACTOR;
+		= 1000000000 + (s64)(s8)iadc_gain_internal * GAIN_LSB_FACTOR;
 	chip->internal_i_gain_mega
 		= div_s64(chip->internal_i_gain_mega, 1000);
-	chip->v_gain_mega = 1000000000 + (s64)vadc_gain * GAIN_LSB_FACTOR;
+	chip->v_gain_mega = 1000000000 + (s64)(s8)vadc_gain * GAIN_LSB_FACTOR;
 	chip->v_gain_mega = div_s64(chip->v_gain_mega, 1000);
 
-	val = 0;
-	rc = qnovo_write(chip, QNOVO_STRM_CTRL, &val, 1);
-	if (rc < 0) {
-		pr_err("Couldn't write iadc bitsteam control rc = %d\n", rc);
-		return rc;
-	}
-
-	rc = qnovo_read(chip, QNOVO_TR_IADC_OFFSET_0, &val, 1);
-	if (rc < 0) {
-		pr_err("Couldn't read iadc offset rc = %d\n", rc);
-		return rc;
-	}
-
-	val *= -1;
-	rc = qnovo_write(chip, QNOVO_TR_IADC_OFFSET_0, &val, 1);
-	if (rc < 0) {
-		pr_err("Couldn't write iadc offset rc = %d\n", rc);
-		return rc;
-	}
-
-	rc = qnovo_read(chip, QNOVO_TR_IADC_OFFSET_1, &val, 1);
-	if (rc < 0) {
-		pr_err("Couldn't read iadc offset rc = %d\n", rc);
-		return rc;
-	}
-
-	val *= -1;
-	rc = qnovo_write(chip, QNOVO_TR_IADC_OFFSET_1, &val, 1);
-	if (rc < 0) {
-		pr_err("Couldn't write iadc offset rc = %d\n", rc);
-		return rc;
-	}
-
 	return 0;
 }
 
@@ -1333,6 +1284,9 @@
 					irq_ptrain_done, rc);
 		return rc;
 	}
+
+	enable_irq_wake(irq_ptrain_done);
+
 	return rc;
 }
 
@@ -1362,13 +1316,6 @@
 		return rc;
 	}
 
-	rc = qnovo_check_chg_version(chip);
-	if (rc < 0) {
-		if (rc != -EPROBE_DEFER)
-			pr_err("Couldn't check version rc=%d\n", rc);
-		return rc;
-	}
-
 	/* set driver data before resources request it */
 	platform_set_drvdata(pdev, chip);
 
@@ -1414,6 +1361,8 @@
 		goto unreg_notifier;
 	}
 
+	device_init_wakeup(chip->dev, true);
+
 	return rc;
 
 unreg_notifier:
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index dab7888..8fd45f18 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -11,6 +11,7 @@
  */
 
 #include <linux/debugfs.h>
+#include <linux/delay.h>
 #include <linux/device.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
@@ -25,7 +26,7 @@
 #include "smb-reg.h"
 #include "smb-lib.h"
 #include "storm-watch.h"
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
 
 #define SMB2_DEFAULT_WPWR_UW	8000000
 
@@ -239,7 +240,6 @@
 struct smb_dt_props {
 	int	fcc_ua;
 	int	usb_icl_ua;
-	int	otg_cl_ua;
 	int	dc_icl_ua;
 	int	boost_threshold_ua;
 	int	fv_uv;
@@ -323,9 +323,9 @@
 		chip->dt.usb_icl_ua = -EINVAL;
 
 	rc = of_property_read_u32(node,
-				"qcom,otg-cl-ua", &chip->dt.otg_cl_ua);
+				"qcom,otg-cl-ua", &chg->otg_cl_ua);
 	if (rc < 0)
-		chip->dt.otg_cl_ua = MICRO_1P5A;
+		chg->otg_cl_ua = MICRO_1P5A;
 
 	rc = of_property_read_u32(node,
 				"qcom,dc-icl-ua", &chip->dt.dc_icl_ua);
@@ -414,6 +414,7 @@
 	POWER_SUPPLY_PROP_BOOST_CURRENT,
 	POWER_SUPPLY_PROP_PE_START,
 	POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
+	POWER_SUPPLY_PROP_HW_CURRENT_MAX,
 };
 
 static int smb2_usb_get_prop(struct power_supply *psy,
@@ -502,6 +503,9 @@
 	case POWER_SUPPLY_PROP_CTM_CURRENT_MAX:
 		val->intval = get_client_vote(chg->usb_icl_votable, CTM_VOTER);
 		break;
+	case POWER_SUPPLY_PROP_HW_CURRENT_MAX:
+		rc = smblib_get_charge_current(chg, &val->intval);
+		break;
 	default:
 		pr_err("get prop %d is not supported in usb\n", psp);
 		rc = -EINVAL;
@@ -610,12 +614,12 @@
 
 static enum power_supply_property smb2_usb_main_props[] = {
 	POWER_SUPPLY_PROP_VOLTAGE_MAX,
-	POWER_SUPPLY_PROP_ICL_REDUCTION,
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
 	POWER_SUPPLY_PROP_TYPE,
 	POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
 	POWER_SUPPLY_PROP_INPUT_VOLTAGE_SETTLED,
 	POWER_SUPPLY_PROP_FCC_DELTA,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
 	/*
 	 * TODO move the TEMP and TEMP_MAX properties here,
 	 * and update the thermal balancer to look here
@@ -634,9 +638,6 @@
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval);
 		break;
-	case POWER_SUPPLY_PROP_ICL_REDUCTION:
-		val->intval = chg->icl_reduction_ua;
-		break;
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
 		rc = smblib_get_charge_param(chg, &chg->param.fcc,
 							&val->intval);
@@ -653,6 +654,9 @@
 	case POWER_SUPPLY_PROP_FCC_DELTA:
 		rc = smblib_get_prop_fcc_delta(chg, val);
 		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		val->intval = get_effective_result(chg->usb_icl_votable);
+		break;
 	default:
 		pr_debug("get prop %d is not supported in usb-main\n", psp);
 		rc = -EINVAL;
@@ -677,12 +681,12 @@
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval);
 		break;
-	case POWER_SUPPLY_PROP_ICL_REDUCTION:
-		rc = smblib_set_icl_reduction(chg, val->intval);
-		break;
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
 		rc = smblib_set_charge_param(chg, &chg->param.fcc, val->intval);
 		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		rc = smblib_set_icl_current(chg, val->intval);
+		break;
 	default:
 		pr_err("set prop %d is not supported\n", psp);
 		rc = -EINVAL;
@@ -838,7 +842,9 @@
 	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
 	POWER_SUPPLY_PROP_VOLTAGE_NOW,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX,
+	POWER_SUPPLY_PROP_VOLTAGE_QNOVO,
 	POWER_SUPPLY_PROP_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CURRENT_QNOVO,
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
 	POWER_SUPPLY_PROP_TEMP,
 	POWER_SUPPLY_PROP_TECHNOLOGY,
@@ -858,6 +864,7 @@
 {
 	struct smb_charger *chg = power_supply_get_drvdata(psy);
 	int rc = 0;
+	union power_supply_propval pval = {0, };
 
 	switch (psp) {
 	case POWER_SUPPLY_PROP_STATUS:
@@ -882,7 +889,14 @@
 		rc = smblib_get_prop_system_temp_level(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_CHARGER_TEMP:
-		rc = smblib_get_prop_charger_temp(chg, val);
+		/* do not query RRADC if charger is not present */
+		rc = smblib_get_prop_usb_present(chg, &pval);
+		if (rc < 0)
+			pr_err("Couldn't get usb present rc=%d\n", rc);
+
+		rc = -ENODATA;
+		if (pval.intval)
+			rc = smblib_get_prop_charger_temp(chg, val);
 		break;
 	case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
 		rc = smblib_get_prop_charger_temp_max(chg, val);
@@ -902,6 +916,9 @@
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		val->intval = get_client_vote(chg->fv_votable, DEFAULT_VOTER);
 		break;
+	case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE:
+		rc = smblib_get_prop_charge_qnovo_enable(chg, val);
+		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_QNOVO:
 		val->intval = chg->qnovo_fv_uv;
 		break;
@@ -977,12 +994,17 @@
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		vote(chg->fv_votable, DEFAULT_VOTER, true, val->intval);
 		break;
+	case POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE:
+		rc = smblib_set_prop_charge_qnovo_enable(chg, val);
+		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_QNOVO:
 		chg->qnovo_fv_uv = val->intval;
 		rc = rerun_election(chg->fv_votable);
 		break;
 	case POWER_SUPPLY_PROP_CURRENT_QNOVO:
 		chg->qnovo_fcc_ua = val->intval;
+		vote(chg->pl_disable_votable, PL_QNOVO_VOTER,
+			val->intval != -EINVAL && val->intval < 2000000, 0);
 		rc = rerun_election(chg->fcc_votable);
 		break;
 	case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
@@ -1115,6 +1137,9 @@
 	struct regulator_config cfg = {};
 	int rc = 0;
 
+	if (chg->micro_usb_mode)
+		return 0;
+
 	chg->vconn_vreg = devm_kzalloc(chg->dev, sizeof(*chg->vconn_vreg),
 				      GFP_KERNEL);
 	if (!chg->vconn_vreg)
@@ -1325,6 +1350,39 @@
 {
 	int rc;
 
+	/* Move to typeC mode */
+	/* configure FSM in idle state and disable UFP_ENABLE bit */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+			TYPEC_DISABLE_CMD_BIT | UFP_EN_CMD_BIT,
+			TYPEC_DISABLE_CMD_BIT);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't put FSM in idle rc=%d\n", rc);
+		return rc;
+	}
+
+	/* wait for FSM to enter idle state */
+	msleep(200);
+	/* configure TypeC mode */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+			TYPE_C_OR_U_USB_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't enable micro USB mode rc=%d\n", rc);
+		return rc;
+	}
+
+	/* wait for mode change before enabling FSM */
+	usleep_range(10000, 11000);
+	/* release FSM from idle state */
+	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
+			TYPEC_DISABLE_CMD_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't release FSM rc=%d\n", rc);
+		return rc;
+	}
+
+	/* wait for FSM to start */
+	msleep(100);
+	/* move to uUSB mode */
 	/* configure FSM in idle state */
 	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
 			TYPEC_DISABLE_CMD_BIT, TYPEC_DISABLE_CMD_BIT);
@@ -1333,6 +1391,8 @@
 		return rc;
 	}
 
+	/* wait for FSM to enter idle state */
+	msleep(200);
 	/* configure micro USB mode */
 	rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
 			TYPE_C_OR_U_USB_BIT, TYPE_C_OR_U_USB_BIT);
@@ -1341,6 +1401,8 @@
 		return rc;
 	}
 
+	/* wait for mode change before enabling FSM */
+	usleep_range(10000, 11000);
 	/* release FSM from idle state */
 	rc = smblib_masked_write(chg, TYPE_C_INTRPT_ENB_SOFTWARE_CTRL_REG,
 			TYPEC_DISABLE_CMD_BIT, 0);
@@ -1386,7 +1448,8 @@
 
 	/* set OTG current limit */
 	rc = smblib_set_charge_param(chg, &chg->param.otg_cl,
-							chip->dt.otg_cl_ua);
+				(chg->wa_flags & OTG_WA) ?
+				chg->param.otg_cl.min_u : chg->otg_cl_ua);
 	if (rc < 0) {
 		pr_err("Couldn't set otg current limit rc=%d\n", rc);
 		return rc;
@@ -1420,10 +1483,10 @@
 		DEFAULT_VOTER, true, chip->dt.fv_uv);
 	vote(chg->dc_icl_votable,
 		DEFAULT_VOTER, true, chip->dt.dc_icl_ua);
-	vote(chg->hvdcp_disable_votable_indirect, DEFAULT_VOTER,
-		chip->dt.hvdcp_disable, 0);
 	vote(chg->hvdcp_disable_votable_indirect, PD_INACTIVE_VOTER,
 			true, 0);
+	vote(chg->hvdcp_disable_votable_indirect, DEFAULT_VOTER,
+		chip->dt.hvdcp_disable, 0);
 	vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER,
 			true, 0);
 	vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
@@ -1489,13 +1552,6 @@
 		return rc;
 	}
 
-	rc = smblib_masked_write(chg, QNOVO_PT_ENABLE_CMD_REG,
-			QNOVO_PT_ENABLE_CMD_BIT, QNOVO_PT_ENABLE_CMD_BIT);
-	if (rc < 0) {
-		dev_err(chg->dev, "Couldn't enable qnovo rc=%d\n", rc);
-		return rc;
-	}
-
 	/* configure step charging */
 	rc = smb2_config_step_charging(chip);
 	if (rc < 0) {
@@ -1520,6 +1576,16 @@
 		return rc;
 	}
 
+	/* disable h/w autonomous parallel charging control */
+	rc = smblib_masked_write(chg, MISC_CFG_REG,
+				 STAT_PARALLEL_1400MA_EN_CFG_BIT, 0);
+	if (rc < 0) {
+		dev_err(chg->dev,
+			"Couldn't disable h/w autonomous parallel control rc=%d\n",
+			rc);
+		return rc;
+	}
+
 	/* configure float charger options */
 	switch (chip->dt.float_option) {
 	case 1:
@@ -1608,6 +1674,15 @@
 	return rc;
 }
 
+static int smb2_post_init(struct smb2 *chip)
+{
+	struct smb_charger *chg = &chip->chg;
+
+	rerun_election(chg->usb_irq_enable_votable);
+
+	return 0;
+}
+
 static int smb2_chg_config_init(struct smb2 *chip)
 {
 	struct smb_charger *chg = &chip->chg;
@@ -1649,7 +1724,7 @@
 		break;
 	case PM660_SUBTYPE:
 		chip->chg.smb_version = PM660_SUBTYPE;
-		chip->chg.wa_flags |= BOOST_BACK_WA;
+		chip->chg.wa_flags |= BOOST_BACK_WA | OTG_WA;
 		chg->param.freq_buck = pm660_params.freq_buck;
 		chg->param.freq_boost = pm660_params.freq_boost;
 		chg->chg_freq.freq_5V		= 600;
@@ -2074,7 +2149,7 @@
 	rc = smb2_init_vconn_regulator(chip);
 	if (rc < 0) {
 		pr_err("Couldn't initialize vconn regulator rc=%d\n",
-			rc);
+				rc);
 		goto cleanup;
 	}
 
@@ -2137,6 +2212,8 @@
 		goto cleanup;
 	}
 
+	smb2_post_init(chip);
+
 	smb2_create_debugfs(chip);
 
 	rc = smblib_get_prop_usb_present(chg, &val);
@@ -2167,6 +2244,8 @@
 	}
 	batt_charge_type = val.intval;
 
+	device_init_wakeup(chg->dev, true);
+
 	pr_info("QPNP SMB2 probed successfully usb:present=%d type=%d batt:present = %d health = %d charge = %d\n",
 		usb_present, chg->usb_psy_desc.type,
 		batt_present, batt_health, batt_charge_type);
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 6b4c214..c8deedd 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -21,7 +21,7 @@
 #include "smb-lib.h"
 #include "smb-reg.h"
 #include "storm-watch.h"
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
 
 #define smblib_err(chg, fmt, ...)		\
 	pr_err("%s: %s: " fmt, chg->name,	\
@@ -160,39 +160,14 @@
 int smblib_icl_override(struct smb_charger *chg, bool override)
 {
 	int rc;
-	bool override_status;
-	u8 stat;
-	u16 reg;
 
-	switch (chg->smb_version) {
-	case PMI8998_SUBTYPE:
-		reg = APSD_RESULT_STATUS_REG;
-		break;
-	case PM660_SUBTYPE:
-		reg = AICL_STATUS_REG;
-		break;
-	default:
-		smblib_dbg(chg, PR_MISC, "Unknown chip version=%x\n",
-				chg->smb_version);
-		return -EINVAL;
-	}
+	rc = smblib_masked_write(chg, USBIN_LOAD_CFG_REG,
+				ICL_OVERRIDE_AFTER_APSD_BIT,
+				override ? ICL_OVERRIDE_AFTER_APSD_BIT : 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't override ICL rc=%d\n", rc);
 
-	rc = smblib_read(chg, reg, &stat);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't read reg=%x rc=%d\n", reg, rc);
-		return rc;
-	}
-	override_status = (bool)(stat & ICL_OVERRIDE_LATCH_BIT);
-
-	if (override != override_status) {
-		rc = smblib_masked_write(chg, CMD_APSD_REG,
-				ICL_OVERRIDE_BIT, ICL_OVERRIDE_BIT);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't override ICL rc=%d\n", rc);
-			return rc;
-		}
-	}
-	return 0;
+	return rc;
 }
 
 /********************
@@ -547,6 +522,26 @@
  * HELPER FUNCTIONS *
  ********************/
 
+static void smblib_rerun_apsd(struct smb_charger *chg)
+{
+	int rc;
+
+	smblib_dbg(chg, PR_MISC, "re-running APSD\n");
+	if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+		rc = smblib_masked_write(chg,
+				USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+				AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't enable HVDCP auth IRQ rc=%d\n",
+									rc);
+	}
+
+	rc = smblib_masked_write(chg, CMD_APSD_REG,
+				APSD_RERUN_BIT, APSD_RERUN_BIT);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't re-run APSD rc=%d\n", rc);
+}
+
 static int try_rerun_apsd_for_hvdcp(struct smb_charger *chg)
 {
 	const struct apsd_result *apsd_result;
@@ -564,11 +559,7 @@
 				chg->hvdcp_disable_votable_indirect)) {
 			apsd_result = smblib_get_apsd_result(chg);
 			if (apsd_result->bit & (QC_2P0_BIT | QC_3P0_BIT)) {
-				/* rerun APSD */
-				smblib_dbg(chg, PR_MISC, "rerun APSD\n");
-				smblib_masked_write(chg, CMD_APSD_REG,
-						APSD_RERUN_BIT,
-						APSD_RERUN_BIT);
+				smblib_rerun_apsd(chg);
 			}
 		}
 	}
@@ -580,12 +571,13 @@
 	const struct apsd_result *apsd_result = smblib_get_apsd_result(chg);
 
 	/* if PD is active, APSD is disabled so won't have a valid result */
-	if (chg->pd_active) {
+	if (chg->pd_active)
 		chg->usb_psy_desc.type = POWER_SUPPLY_TYPE_USB_PD;
-		return apsd_result;
-	}
+	else
+		chg->usb_psy_desc.type = apsd_result->pst;
 
-	chg->usb_psy_desc.type = apsd_result->pst;
+	smblib_dbg(chg, PR_MISC, "APSD=%s PD=%d\n",
+					apsd_result->name, chg->pd_active);
 	return apsd_result;
 }
 
@@ -661,10 +653,13 @@
 {
 	int rc;
 
+	cancel_delayed_work_sync(&chg->pl_enable_work);
+	vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
+	vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
+
 	/* reset both usbin current and voltage votes */
 	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
 	vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
-	vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, true, 0);
 
 	cancel_delayed_work_sync(&chg->hvdcp_detect_work);
 
@@ -700,28 +695,6 @@
 	if (rc < 0)
 		smblib_err(chg,
 			"Couldn't un-vote DCP from USB ICL rc=%d\n", rc);
-
-	/* clear USB ICL vote for PL_USBIN_USBIN_VOTER */
-	rc = vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
-	if (rc < 0)
-		smblib_err(chg,
-			"Couldn't un-vote PL_USBIN_USBIN from USB ICL rc=%d\n",
-			rc);
-}
-
-static bool smblib_sysok_reason_usbin(struct smb_charger *chg)
-{
-	int rc;
-	u8 stat;
-
-	rc = smblib_read(chg, SYSOK_REASON_STATUS_REG, &stat);
-	if (rc < 0) {
-		smblib_err(chg, "Couldn't get SYSOK_REASON_STATUS rc=%d\n", rc);
-		/* assuming 'not usbin' in case of read failure */
-		return false;
-	}
-
-	return stat & SYSOK_REASON_USBIN_BIT;
 }
 
 void smblib_suspend_on_debug_battery(struct smb_charger *chg)
@@ -747,7 +720,6 @@
 
 int smblib_rerun_apsd_if_required(struct smb_charger *chg)
 {
-	const struct apsd_result *apsd_result;
 	union power_supply_propval val;
 	int rc;
 
@@ -760,21 +732,27 @@
 	if (!val.intval)
 		return 0;
 
-	apsd_result = smblib_get_apsd_result(chg);
-	if ((apsd_result->pst == POWER_SUPPLY_TYPE_UNKNOWN)
-		|| (apsd_result->pst == POWER_SUPPLY_TYPE_USB)) {
-		/* rerun APSD */
-		pr_info("Reruning APSD type = %s at bootup\n",
-				apsd_result->name);
-		rc = smblib_masked_write(chg, CMD_APSD_REG,
-					APSD_RERUN_BIT,
-					APSD_RERUN_BIT);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't rerun APSD rc = %d\n", rc);
-			return rc;
+	/* fetch the DPDM regulator */
+	if (!chg->dpdm_reg && of_get_property(chg->dev->of_node,
+						"dpdm-supply", NULL)) {
+		chg->dpdm_reg = devm_regulator_get(chg->dev, "dpdm");
+		if (IS_ERR(chg->dpdm_reg)) {
+			smblib_err(chg, "Couldn't get dpdm regulator rc=%ld\n",
+				PTR_ERR(chg->dpdm_reg));
+			chg->dpdm_reg = NULL;
 		}
 	}
 
+	if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
+		smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
+		rc = regulator_enable(chg->dpdm_reg);
+		if (rc < 0)
+			smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
+				rc);
+	}
+
+	smblib_rerun_apsd(chg);
+
 	return 0;
 }
 
@@ -812,29 +790,12 @@
 	return 0;
 }
 
-/*********************
- * VOTABLE CALLBACKS *
- *********************/
-
-static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data,
-			int suspend, const char *client)
-{
-	struct smb_charger *chg = data;
-
-	/* resume input if suspend is invalid */
-	if (suspend < 0)
-		suspend = 0;
-
-	return smblib_set_dc_suspend(chg, (bool)suspend);
-}
-
 #define USBIN_25MA	25000
 #define USBIN_100MA	100000
 #define USBIN_150MA	150000
 #define USBIN_500MA	500000
 #define USBIN_900MA	900000
 
-
 static int set_sdp_current(struct smb_charger *chg, int icl_ua)
 {
 	int rc;
@@ -873,20 +834,18 @@
 	return rc;
 }
 
-static int smblib_usb_icl_vote_callback(struct votable *votable, void *data,
-			int icl_ua, const char *client)
+int smblib_set_icl_current(struct smb_charger *chg, int icl_ua)
 {
-	struct smb_charger *chg = data;
 	int rc = 0;
 	bool override;
 	union power_supply_propval pval;
 
 	/* suspend and return if 25mA or less is requested */
-	if (client && (icl_ua < USBIN_25MA))
+	if (icl_ua < USBIN_25MA)
 		return smblib_set_usb_suspend(chg, true);
 
 	disable_irq_nosync(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq);
-	if (!client)
+	if (icl_ua == INT_MAX)
 		goto override_suspend_config;
 
 	rc = smblib_get_prop_typec_mode(chg, &pval);
@@ -904,8 +863,7 @@
 			goto enable_icl_changed_interrupt;
 		}
 	} else {
-		rc = smblib_set_charge_param(chg, &chg->param.usb_icl,
-				icl_ua - chg->icl_reduction_ua);
+		rc = smblib_set_charge_param(chg, &chg->param.usb_icl, icl_ua);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't set HC ICL rc=%d\n", rc);
 			goto enable_icl_changed_interrupt;
@@ -915,7 +873,7 @@
 override_suspend_config:
 	/* determine if override needs to be enforced */
 	override = true;
-	if (client == NULL) {
+	if (icl_ua == INT_MAX) {
 		/* remove override if no voters - hw defaults is desired */
 		override = false;
 	} else if (pval.intval == POWER_SUPPLY_TYPEC_SOURCE_DEFAULT) {
@@ -923,7 +881,7 @@
 			/* For std cable with type = SDP never override */
 			override = false;
 		else if (chg->usb_psy_desc.type == POWER_SUPPLY_TYPE_USB_CDP
-			&& icl_ua - chg->icl_reduction_ua == 1500000)
+			&& icl_ua == 1500000)
 			/*
 			 * For std cable with type = CDP override only if
 			 * current is not 1500mA
@@ -953,6 +911,22 @@
 	return rc;
 }
 
+/*********************
+ * VOTABLE CALLBACKS *
+ *********************/
+
+static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data,
+			int suspend, const char *client)
+{
+	struct smb_charger *chg = data;
+
+	/* resume input if suspend is invalid */
+	if (suspend < 0)
+		suspend = 0;
+
+	return smblib_set_dc_suspend(chg, (bool)suspend);
+}
+
 static int smblib_dc_icl_vote_callback(struct votable *votable, void *data,
 			int icl_ua, const char *client)
 {
@@ -1089,16 +1063,6 @@
 	int rc;
 
 	if (apsd_disable) {
-		/* Don't run APSD on CC debounce when APSD is disabled */
-		rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
-							APSD_START_ON_CC_BIT,
-							0);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
-									rc);
-			return rc;
-		}
-
 		rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
 							AUTO_SRC_DETECT_BIT,
 							0);
@@ -1114,15 +1078,6 @@
 			smblib_err(chg, "Couldn't enable APSD rc=%d\n", rc);
 			return rc;
 		}
-
-		rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
-							APSD_START_ON_CC_BIT,
-							APSD_START_ON_CC_BIT);
-		if (rc < 0) {
-			smblib_err(chg, "Couldn't enable APSD_START_ON_CC rc=%d\n",
-									rc);
-			return rc;
-		}
 	}
 
 	return 0;
@@ -1159,6 +1114,26 @@
 	return rc;
 }
 
+static int smblib_usb_irq_enable_vote_callback(struct votable *votable,
+				void *data, int enable, const char *client)
+{
+	struct smb_charger *chg = data;
+
+	if (!chg->irq_info[INPUT_CURRENT_LIMIT_IRQ].irq ||
+				!chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq)
+		return 0;
+
+	if (enable) {
+		enable_irq(chg->irq_info[INPUT_CURRENT_LIMIT_IRQ].irq);
+		enable_irq(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
+	} else {
+		disable_irq(chg->irq_info[INPUT_CURRENT_LIMIT_IRQ].irq);
+		disable_irq(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
+	}
+
+	return 0;
+}
+
 /*******************
  * VCONN REGULATOR *
  * *****************/
@@ -1281,11 +1256,14 @@
 /*****************
  * OTG REGULATOR *
  *****************/
-
+#define MAX_RETRY		15
+#define MIN_DELAY_US		2000
+#define MAX_DELAY_US		9000
 static int _smblib_vbus_regulator_enable(struct regulator_dev *rdev)
 {
 	struct smb_charger *chg = rdev_get_drvdata(rdev);
-	int rc;
+	int rc, retry_count = 0, min_delay = MIN_DELAY_US;
+	u8 stat;
 
 	smblib_dbg(chg, PR_OTG, "halt 1 in 8 mode\n");
 	rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
@@ -1304,6 +1282,42 @@
 		return rc;
 	}
 
+	if (chg->wa_flags & OTG_WA) {
+		/* check for softstart */
+		do {
+			usleep_range(min_delay, min_delay + 100);
+			rc = smblib_read(chg, OTG_STATUS_REG, &stat);
+			if (rc < 0) {
+				smblib_err(chg,
+					"Couldn't read OTG status rc=%d\n",
+					rc);
+				goto out;
+			}
+
+			if (stat & BOOST_SOFTSTART_DONE_BIT) {
+				rc = smblib_set_charge_param(chg,
+					&chg->param.otg_cl, chg->otg_cl_ua);
+				if (rc < 0)
+					smblib_err(chg,
+						"Couldn't set otg limit\n");
+				break;
+			}
+
+			/* increase the delay for following iterations */
+			if (retry_count > 5)
+				min_delay = MAX_DELAY_US;
+		} while (retry_count++ < MAX_RETRY);
+
+		if (retry_count >= MAX_RETRY) {
+			smblib_dbg(chg, PR_OTG, "Boost Softstart not done\n");
+			goto out;
+		}
+	}
+
+	return 0;
+out:
+	/* disable OTG if softstart failed */
+	smblib_write(chg, CMD_OTG_REG, 0);
 	return rc;
 }
 
@@ -1316,6 +1330,14 @@
 	if (chg->otg_en)
 		goto unlock;
 
+	if (!chg->usb_icl_votable) {
+		chg->usb_icl_votable = find_votable("USB_ICL");
+
+		if (!chg->usb_icl_votable)
+			return -EINVAL;
+	}
+	vote(chg->usb_icl_votable, USBIN_USBIN_BOOST_VOTER, true, 0);
+
 	rc = _smblib_vbus_regulator_enable(rdev);
 	if (rc >= 0)
 		chg->otg_en = true;
@@ -1337,6 +1359,17 @@
 			smblib_err(chg, "Couldn't disable VCONN rc=%d\n", rc);
 	}
 
+	if (chg->wa_flags & OTG_WA) {
+		/* set OTG current limit to minimum value */
+		rc = smblib_set_charge_param(chg, &chg->param.otg_cl,
+						chg->param.otg_cl.min_u);
+		if (rc < 0) {
+			smblib_err(chg,
+				"Couldn't set otg current limit rc=%d\n", rc);
+			return rc;
+		}
+	}
+
 	smblib_dbg(chg, PR_OTG, "disabling OTG\n");
 	rc = smblib_write(chg, CMD_OTG_REG, 0);
 	if (rc < 0) {
@@ -1345,7 +1378,6 @@
 	}
 
 	smblib_dbg(chg, PR_OTG, "start 1 in 8 mode\n");
-	rc = smblib_write(chg, CMD_OTG_REG, 0);
 	rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
 				 ENG_BUCKBOOST_HALT1_8_MODE_BIT, 0);
 	if (rc < 0) {
@@ -1369,6 +1401,8 @@
 	if (rc >= 0)
 		chg->otg_en = false;
 
+	if (chg->usb_icl_votable)
+		vote(chg->usb_icl_votable, USBIN_USBIN_BOOST_VOTER, false, 0);
 unlock:
 	mutex_unlock(&chg->otg_oc_lock);
 	return rc;
@@ -1682,6 +1716,23 @@
 	return 0;
 }
 
+int smblib_get_prop_charge_qnovo_enable(struct smb_charger *chg,
+				  union power_supply_propval *val)
+{
+	int rc;
+	u8 stat;
+
+	rc = smblib_read(chg, QNOVO_PT_ENABLE_CMD_REG, &stat);
+	if (rc < 0) {
+		smblib_err(chg, "Couldn't read QNOVO_PT_ENABLE_CMD rc=%d\n",
+			rc);
+		return rc;
+	}
+
+	val->intval = (bool)(stat & QNOVO_PT_ENABLE_CMD_BIT);
+	return 0;
+}
+
 /***********************
  * BATTERY PSY SETTERS *
  ***********************/
@@ -1733,6 +1784,10 @@
 		return -EINVAL;
 
 	chg->system_temp_level = val->intval;
+	/* disable parallel charge in case of system temp level */
+	vote(chg->pl_disable_votable, THERMAL_DAEMON_VOTER,
+			chg->system_temp_level ? true : false, 0);
+
 	if (chg->system_temp_level == chg->thermal_levels)
 		return vote(chg->chg_disable_votable,
 			THERMAL_DAEMON_VOTER, true, 0);
@@ -1746,6 +1801,22 @@
 	return 0;
 }
 
+int smblib_set_prop_charge_qnovo_enable(struct smb_charger *chg,
+				  const union power_supply_propval *val)
+{
+	int rc = 0;
+
+	rc = smblib_masked_write(chg, QNOVO_PT_ENABLE_CMD_REG,
+			QNOVO_PT_ENABLE_CMD_BIT,
+			val->intval ? QNOVO_PT_ENABLE_CMD_BIT : 0);
+	if (rc < 0) {
+		dev_err(chg->dev, "Couldn't enable qnovo rc=%d\n", rc);
+		return rc;
+	}
+
+	return rc;
+}
+
 int smblib_rerun_aicl(struct smb_charger *chg)
 {
 	int rc, settled_icl_ua;
@@ -1951,7 +2022,7 @@
 	int rc = 0;
 	u8 stat;
 
-	if (get_client_vote(chg->usb_icl_votable, USER_VOTER) == 0) {
+	if (get_client_vote_locked(chg->usb_icl_votable, USER_VOTER) == 0) {
 		val->intval = false;
 		return rc;
 	}
@@ -2445,10 +2516,6 @@
 		return rc;
 	}
 
-	if (chg->mode == PARALLEL_MASTER)
-		vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER,
-		     min_uv > MICRO_5V, 0);
-
 	chg->voltage_min_uv = min_uv;
 	return rc;
 }
@@ -2468,10 +2535,6 @@
 	}
 
 	chg->voltage_max_uv = max_uv;
-	rc = smblib_rerun_aicl(chg);
-	if (rc < 0)
-		smblib_err(chg, "Couldn't re-run AICL rc=%d\n", rc);
-
 	return rc;
 }
 
@@ -2491,6 +2554,7 @@
 
 	vote(chg->apsd_disable_votable, PD_VOTER, pd_active, 0);
 	vote(chg->pd_allowed_votable, PD_VOTER, pd_active, 0);
+	vote(chg->usb_irq_enable_votable, PD_VOTER, pd_active, 0);
 
 	/*
 	 * VCONN_EN_ORIENTATION_BIT controls whether to use CC1 or CC2 line
@@ -2526,6 +2590,9 @@
 			return rc;
 		}
 
+		/* since PD was found the cable must be non-legacy */
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
+
 		/* clear USB ICL vote for DCP_VOTER */
 		rc = vote(chg->usb_icl_votable, DCP_VOTER, false, 0);
 		if (rc < 0)
@@ -2533,29 +2600,12 @@
 				"Couldn't un-vote DCP from USB ICL rc=%d\n",
 				rc);
 
-		/* clear USB ICL vote for PL_USBIN_USBIN_VOTER */
-		rc = vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
-		if (rc < 0)
-			smblib_err(chg,
-					"Couldn't un-vote PL_USBIN_USBIN from USB ICL rc=%d\n",
-					rc);
-
 		/* remove USB_PSY_VOTER */
 		rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
 		if (rc < 0) {
 			smblib_err(chg, "Couldn't unvote USB_PSY rc=%d\n", rc);
 			return rc;
 		}
-
-		/* pd active set, parallel charger can be enabled now */
-		rc = vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER,
-				false, 0);
-		if (rc < 0) {
-			smblib_err(chg,
-				"Couldn't unvote PL_DELAY_HVDCP_VOTER rc=%d\n",
-				rc);
-			return rc;
-		}
 	}
 
 	/* CC pin selection s/w override in PD session; h/w otherwise. */
@@ -2643,12 +2693,6 @@
 
 static struct reg_info cc2_detach_settings[] = {
 	{
-		.reg	= TYPE_C_CFG_REG,
-		.mask	= APSD_START_ON_CC_BIT,
-		.val	= 0,
-		.desc	= "TYPE_C_CFG_REG",
-	},
-	{
 		.reg	= TYPE_C_CFG_2_REG,
 		.mask	= TYPE_C_UFP_MODE_BIT | EN_TRY_SOURCE_MODE_BIT,
 		.val	= TYPE_C_UFP_MODE_BIT,
@@ -2810,15 +2854,21 @@
 #define TYPEC_DEFAULT_CURRENT_MA	900000
 #define TYPEC_MEDIUM_CURRENT_MA		1500000
 #define TYPEC_HIGH_CURRENT_MA		3000000
-static int smblib_get_charge_current(struct smb_charger *chg,
+int smblib_get_charge_current(struct smb_charger *chg,
 				int *total_current_ua)
 {
 	const struct apsd_result *apsd_result = smblib_update_usb_type(chg);
 	union power_supply_propval val = {0, };
-	int rc, typec_source_rd, current_ua;
+	int rc = 0, typec_source_rd, current_ua;
 	bool non_compliant;
 	u8 stat5;
 
+	if (chg->pd_active) {
+		*total_current_ua =
+			get_client_vote_locked(chg->usb_icl_votable, PD_VOTER);
+		return rc;
+	}
+
 	rc = smblib_read(chg, TYPE_C_STATUS_5_REG, &stat5);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't read TYPE_C_STATUS_5 rc=%d\n", rc);
@@ -2893,39 +2943,12 @@
 	return 0;
 }
 
-int smblib_set_icl_reduction(struct smb_charger *chg, int reduction_ua)
-{
-	int current_ua, rc;
-
-	if (reduction_ua == 0) {
-		vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
-	} else {
-		/*
-		 * No usb_icl voter means we are defaulting to hw chosen
-		 * max limit. We need a vote from s/w to enforce the reduction.
-		 */
-		if (get_effective_result(chg->usb_icl_votable) == -EINVAL) {
-			rc = smblib_get_charge_current(chg, &current_ua);
-			if (rc < 0) {
-				pr_err("Failed to get ICL rc=%d\n", rc);
-				return rc;
-			}
-			vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, true,
-					current_ua);
-		}
-	}
-
-	chg->icl_reduction_ua = reduction_ua;
-
-	return rerun_election(chg->usb_icl_votable);
-}
-
 /************************
  * PARALLEL PSY GETTERS *
  ************************/
 
 int smblib_get_prop_slave_current_now(struct smb_charger *chg,
-				      union power_supply_propval *pval)
+		union power_supply_propval *pval)
 {
 	if (IS_ERR_OR_NULL(chg->iio.batt_i_chan))
 		chg->iio.batt_i_chan = iio_channel_get(chg->dev, "batt_i");
@@ -2962,6 +2985,14 @@
 		return IRQ_HANDLED;
 	}
 
+	if (chg->wa_flags & OTG_WA) {
+		if (stat & OTG_OC_DIS_SW_STS_RT_STS_BIT)
+			smblib_err(chg, "OTG disabled by hw\n");
+
+		/* not handling software based hiccups for PM660 */
+		return IRQ_HANDLED;
+	}
+
 	if (stat & OTG_OVERCURRENT_RT_STS_BIT)
 		schedule_work(&chg->otg_oc_work);
 
@@ -2980,7 +3011,7 @@
 	rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
 	if (rc < 0) {
 		smblib_err(chg, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
-			rc);
+				rc);
 		return IRQ_HANDLED;
 	}
 
@@ -3085,6 +3116,7 @@
 	return IRQ_HANDLED;
 }
 
+#define PL_DELAY_MS			30000
 irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
 {
 	struct smb_irq_data *irq_data = data;
@@ -3123,6 +3155,11 @@
 				smblib_err(chg, "Couldn't enable dpdm regulator rc=%d\n",
 					rc);
 		}
+
+		/* Schedule work to enable parallel charger */
+		vote(chg->awake_votable, PL_DELAY_VOTER, true, 0);
+		schedule_delayed_work(&chg->pl_enable_work,
+					msecs_to_jiffies(PL_DELAY_MS));
 	} else {
 		if (chg->wa_flags & BOOST_BACK_WA)
 			vote(chg->usb_icl_votable, BOOST_BACK_VOTER, false, 0);
@@ -3177,6 +3214,7 @@
 				|| (stat & AICL_DONE_BIT))
 			delay = 0;
 
+		cancel_delayed_work_sync(&chg->icl_change_work);
 		schedule_delayed_work(&chg->icl_change_work,
 						msecs_to_jiffies(delay));
 	}
@@ -3286,11 +3324,19 @@
 	if (chg->mode == PARALLEL_MASTER)
 		vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, true, 0);
 
-	/* QC authentication done, parallel charger can be enabled now */
-	vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, false, 0);
-
 	/* the APSD done handler will set the USB supply type */
 	apsd_result = smblib_get_apsd_result(chg);
+	if (get_effective_result(chg->hvdcp_hw_inov_dis_votable)) {
+		if (apsd_result->pst == POWER_SUPPLY_TYPE_USB_HVDCP) {
+			/* force HVDCP2 to 9V if INOV is disabled */
+			rc = smblib_masked_write(chg, CMD_HVDCP_2_REG,
+					FORCE_9V_BIT, FORCE_9V_BIT);
+			if (rc < 0)
+				smblib_err(chg,
+					"Couldn't force 9V HVDCP rc=%d\n", rc);
+		}
+	}
+
 	smblib_dbg(chg, PR_INTERRUPT, "IRQ: hvdcp-3p0-auth-done rising; %s detected\n",
 		   apsd_result->name);
 }
@@ -3308,6 +3354,10 @@
 			/* could be a legacy cable, try doing hvdcp */
 			try_rerun_apsd_for_hvdcp(chg);
 
+		/* enable HDC and ICL irq for QC2/3 charger */
+		if (qc_charger)
+			vote(chg->usb_irq_enable_votable, QC_VOTER, true, 0);
+
 		/*
 		 * HVDCP detection timeout done
 		 * If adapter is not QC2.0/QC3.0 - it is a plain old DCP.
@@ -3316,15 +3366,6 @@
 			/* enforce DCP ICL if specified */
 			vote(chg->usb_icl_votable, DCP_VOTER,
 				chg->dcp_icl_ua != -EINVAL, chg->dcp_icl_ua);
-		/*
-		 * If adapter is not QC2.0/QC3.0 remove vote for parallel
-		 * disable.
-		 * Otherwise if adapter is QC2.0/QC3.0 wait for authentication
-		 * to complete.
-		 */
-		if (!qc_charger)
-			vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER,
-					false, 0);
 	}
 
 	smblib_dbg(chg, PR_INTERRUPT, "IRQ: smblib_handle_hvdcp_check_timeout %s\n",
@@ -3344,6 +3385,37 @@
 		   rising ? "rising" : "falling");
 }
 
+static void smblib_force_legacy_icl(struct smb_charger *chg, int pst)
+{
+	switch (pst) {
+	case POWER_SUPPLY_TYPE_USB:
+		/*
+		 * USB_PSY will vote to increase the current to 500/900mA once
+		 * enumeration is done. Ensure that USB_PSY has at least voted
+		 * for 100mA before releasing the LEGACY_UNKNOWN vote
+		 */
+		if (!is_client_vote_enabled(chg->usb_icl_votable,
+								USB_PSY_VOTER))
+			vote(chg->usb_icl_votable, USB_PSY_VOTER, true, 100000);
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
+		break;
+	case POWER_SUPPLY_TYPE_USB_CDP:
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 1500000);
+		break;
+	case POWER_SUPPLY_TYPE_USB_DCP:
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 1500000);
+		break;
+	case POWER_SUPPLY_TYPE_USB_HVDCP:
+	case POWER_SUPPLY_TYPE_USB_HVDCP_3:
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 3000000);
+		break;
+	default:
+		smblib_err(chg, "Unknown APSD %d; forcing 500mA\n", pst);
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 500000);
+		break;
+	}
+}
+
 #define HVDCP_DET_MS 2500
 static void smblib_handle_apsd_done(struct smb_charger *chg, bool rising)
 {
@@ -3353,6 +3425,10 @@
 		return;
 
 	apsd_result = smblib_update_usb_type(chg);
+
+	if (!chg->pd_active)
+		smblib_force_legacy_icl(chg, apsd_result->pst);
+
 	switch (apsd_result->bit) {
 	case SDP_CHARGER_BIT:
 	case CDP_CHARGER_BIT:
@@ -3365,13 +3441,9 @@
 		break;
 	case OCP_CHARGER_BIT:
 	case FLOAT_CHARGER_BIT:
-		/*
-		 * if not DCP then no hvdcp timeout happens. Enable
-		 * pd/parallel here.
-		 */
+		/* if not DCP then no hvdcp timeout happens, Enable pd here. */
 		vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER,
 				false, 0);
-		vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, false, 0);
 		break;
 	case DCP_CHARGER_BIT:
 		if (chg->wa_flags & QC_CHARGER_DETECTION_WA_BIT)
@@ -3437,6 +3509,9 @@
 {
 	int rc;
 
+	/* reset legacy unknown vote */
+	vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, false, 0);
+
 	/* reset both usbin current and voltage votes */
 	vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER, false, 0);
 	vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, false, 0);
@@ -3480,16 +3555,19 @@
 		smblib_err(chg,
 			"Couldn't un-vote DCP from USB ICL rc=%d\n", rc);
 
-	/* clear USB ICL vote for PL_USBIN_USBIN_VOTER */
-	rc = vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
-	if (rc < 0)
-		smblib_err(chg,
-			"Couldn't un-vote PL_USBIN_USBIN from USB ICL rc=%d\n",
-			rc);
 }
 
 static void typec_source_insertion(struct smb_charger *chg)
 {
+	/*
+	 * at any time we want LEGACY_UNKNOWN, PD, or USB_PSY to be voting for
+	 * ICL, so vote LEGACY_UNKNOWN here if none of the above three have
+	 * casted their votes
+	 */
+	if (!is_client_vote_enabled(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER)
+		&& !is_client_vote_enabled(chg->usb_icl_votable, PD_VOTER)
+		&& !is_client_vote_enabled(chg->usb_icl_votable, USB_PSY_VOTER))
+		vote(chg->usb_icl_votable, LEGACY_UNKNOWN_VOTER, true, 100000);
 }
 
 static void typec_sink_insertion(struct smb_charger *chg)
@@ -3510,11 +3588,16 @@
 
 static void smblib_handle_typec_removal(struct smb_charger *chg)
 {
+	int rc;
+
+	cancel_delayed_work_sync(&chg->pl_enable_work);
+	vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
+	vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
+
 	vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, true, 0);
 	vote(chg->pd_disallowed_votable_indirect, HVDCP_TIMEOUT_VOTER, true, 0);
-	vote(chg->pd_disallowed_votable_indirect, LEGACY_CABLE_VOTER, true, 0);
-	vote(chg->pd_disallowed_votable_indirect, VBUS_CC_SHORT_VOTER, true, 0);
-	vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, true, 0);
+	vote(chg->usb_irq_enable_votable, PD_VOTER, false, 0);
+	vote(chg->usb_irq_enable_votable, QC_VOTER, false, 0);
 
 	/* reset votes from vbus_cc_short */
 	vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
@@ -3532,10 +3615,13 @@
 	chg->pulse_cnt = 0;
 	chg->usb_icl_delta_ua = 0;
 
-	chg->usb_ever_removed = true;
+	/* enable APSD CC trigger for next insertion */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_REG,
+				APSD_START_ON_CC_BIT, APSD_START_ON_CC_BIT);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't enable APSD_START_ON_CC rc=%d\n", rc);
 
 	smblib_update_usb_type(chg);
-
 	typec_source_removal(chg);
 	typec_sink_removal(chg);
 }
@@ -3543,12 +3629,16 @@
 static void smblib_handle_typec_insertion(struct smb_charger *chg,
 		bool sink_attached, bool legacy_cable)
 {
-	int rp;
-	bool vbus_cc_short = false;
-	bool valid_legacy_cable;
+	int rp, rc;
 
 	vote(chg->pd_disallowed_votable_indirect, CC_DETACHED_VOTER, false, 0);
 
+	/* disable APSD CC trigger since CC is attached */
+	rc = smblib_masked_write(chg, TYPE_C_CFG_REG, APSD_START_ON_CC_BIT, 0);
+	if (rc < 0)
+		smblib_err(chg, "Couldn't disable APSD_START_ON_CC rc=%d\n",
+									rc);
+
 	if (sink_attached) {
 		typec_source_removal(chg);
 		typec_sink_insertion(chg);
@@ -3557,25 +3647,16 @@
 		typec_sink_removal(chg);
 	}
 
-	valid_legacy_cable = legacy_cable &&
-		(chg->usb_ever_removed || !smblib_sysok_reason_usbin(chg));
-	vote(chg->pd_disallowed_votable_indirect, LEGACY_CABLE_VOTER,
-			valid_legacy_cable, 0);
-
-	if (valid_legacy_cable) {
-		rp = smblib_get_prop_ufp_mode(chg);
-		if (rp == POWER_SUPPLY_TYPEC_SOURCE_HIGH
-				|| rp == POWER_SUPPLY_TYPEC_NON_COMPLIANT) {
-			vbus_cc_short = true;
-			smblib_err(chg, "Disabling PD and HVDCP, VBUS-CC shorted, rp = %d found\n",
-					rp);
-		}
+	rp = smblib_get_prop_ufp_mode(chg);
+	if (rp == POWER_SUPPLY_TYPEC_SOURCE_HIGH
+			|| rp == POWER_SUPPLY_TYPEC_NON_COMPLIANT) {
+		smblib_dbg(chg, PR_MISC, "VBUS & CC could be shorted; keeping HVDCP disabled\n");
+		vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
+								true, 0);
+	} else {
+		vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
+								false, 0);
 	}
-
-	vote(chg->hvdcp_disable_votable_indirect, VBUS_CC_SHORT_VOTER,
-			vbus_cc_short, 0);
-	vote(chg->pd_disallowed_votable_indirect, VBUS_CC_SHORT_VOTER,
-			vbus_cc_short, 0);
 }
 
 static void smblib_handle_typec_debounce_done(struct smb_charger *chg,
@@ -3978,6 +4059,9 @@
 	int rc, i;
 	u8 stat;
 
+	if (chg->micro_usb_mode)
+		return;
+
 	smblib_err(chg, "over-current detected on VCONN\n");
 	if (!chg->vconn_vreg || !chg->vconn_vreg->rdev)
 		return;
@@ -4072,6 +4156,16 @@
 	smblib_dbg(chg, PR_INTERRUPT, "icl_settled=%d\n", settled_ua);
 }
 
+static void smblib_pl_enable_work(struct work_struct *work)
+{
+	struct smb_charger *chg = container_of(work, struct smb_charger,
+							pl_enable_work.work);
+
+	smblib_dbg(chg, PR_PARALLEL, "timer expired, enabling parallel\n");
+	vote(chg->pl_disable_votable, PL_DELAY_VOTER, false, 0);
+	vote(chg->awake_votable, PL_DELAY_VOTER, false, 0);
+}
+
 static int smblib_create_votables(struct smb_charger *chg)
 {
 	int rc = 0;
@@ -4088,13 +4182,19 @@
 		return rc;
 	}
 
+	chg->usb_icl_votable = find_votable("USB_ICL");
+	if (!chg->usb_icl_votable) {
+		rc = -EPROBE_DEFER;
+		return rc;
+	}
+
 	chg->pl_disable_votable = find_votable("PL_DISABLE");
 	if (!chg->pl_disable_votable) {
 		rc = -EPROBE_DEFER;
 		return rc;
 	}
 	vote(chg->pl_disable_votable, PL_INDIRECT_VOTER, true, 0);
-	vote(chg->pl_disable_votable, PL_DELAY_HVDCP_VOTER, true, 0);
+	vote(chg->pl_disable_votable, PL_DELAY_VOTER, true, 0);
 
 	chg->dc_suspend_votable = create_votable("DC_SUSPEND", VOTE_SET_ANY,
 					smblib_dc_suspend_vote_callback,
@@ -4104,14 +4204,6 @@
 		return rc;
 	}
 
-	chg->usb_icl_votable = create_votable("USB_ICL", VOTE_MIN,
-					smblib_usb_icl_vote_callback,
-					chg);
-	if (IS_ERR(chg->usb_icl_votable)) {
-		rc = PTR_ERR(chg->usb_icl_votable);
-		return rc;
-	}
-
 	chg->dc_icl_votable = create_votable("DC_ICL", VOTE_MIN,
 					smblib_dc_icl_vote_callback,
 					chg);
@@ -4197,6 +4289,15 @@
 		return rc;
 	}
 
+	chg->usb_irq_enable_votable = create_votable("USB_IRQ_DISABLE",
+					VOTE_SET_ANY,
+					smblib_usb_irq_enable_vote_callback,
+					chg);
+	if (IS_ERR(chg->usb_irq_enable_votable)) {
+		rc = PTR_ERR(chg->usb_irq_enable_votable);
+		return rc;
+	}
+
 	return rc;
 }
 
@@ -4253,6 +4354,7 @@
 	INIT_WORK(&chg->vconn_oc_work, smblib_vconn_oc_work);
 	INIT_DELAYED_WORK(&chg->otg_ss_done_work, smblib_otg_ss_done_work);
 	INIT_DELAYED_WORK(&chg->icl_change_work, smblib_icl_change_work);
+	INIT_DELAYED_WORK(&chg->pl_enable_work, smblib_pl_enable_work);
 	chg->fake_capacity = -EINVAL;
 
 	switch (chg->mode) {
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index 21ccd3c..49b9d3d 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -32,10 +32,12 @@
 #define USER_VOTER			"USER_VOTER"
 #define PD_VOTER			"PD_VOTER"
 #define DCP_VOTER			"DCP_VOTER"
+#define QC_VOTER			"QC_VOTER"
 #define PL_USBIN_USBIN_VOTER		"PL_USBIN_USBIN_VOTER"
 #define USB_PSY_VOTER			"USB_PSY_VOTER"
 #define PL_TAPER_WORK_RUNNING_VOTER	"PL_TAPER_WORK_RUNNING_VOTER"
 #define PL_INDIRECT_VOTER		"PL_INDIRECT_VOTER"
+#define PL_QNOVO_VOTER			"PL_QNOVO_VOTER"
 #define USBIN_I_VOTER			"USBIN_I_VOTER"
 #define USBIN_V_VOTER			"USBIN_V_VOTER"
 #define CHG_STATE_VOTER			"CHG_STATE_VOTER"
@@ -47,17 +49,18 @@
 #define PD_DISALLOWED_INDIRECT_VOTER	"PD_DISALLOWED_INDIRECT_VOTER"
 #define PD_HARD_RESET_VOTER		"PD_HARD_RESET_VOTER"
 #define VBUS_CC_SHORT_VOTER		"VBUS_CC_SHORT_VOTER"
-#define LEGACY_CABLE_VOTER		"LEGACY_CABLE_VOTER"
 #define PD_INACTIVE_VOTER		"PD_INACTIVE_VOTER"
 #define BOOST_BACK_VOTER		"BOOST_BACK_VOTER"
+#define USBIN_USBIN_BOOST_VOTER		"USBIN_USBIN_BOOST_VOTER"
 #define HVDCP_INDIRECT_VOTER		"HVDCP_INDIRECT_VOTER"
 #define MICRO_USB_VOTER			"MICRO_USB_VOTER"
 #define DEBUG_BOARD_VOTER		"DEBUG_BOARD_VOTER"
 #define PD_SUSPEND_SUPPORTED_VOTER	"PD_SUSPEND_SUPPORTED_VOTER"
-#define PL_DELAY_HVDCP_VOTER		"PL_DELAY_HVDCP_VOTER"
+#define PL_DELAY_VOTER			"PL_DELAY_VOTER"
 #define CTM_VOTER			"CTM_VOTER"
 #define SW_QC3_VOTER			"SW_QC3_VOTER"
 #define AICL_RERUN_VOTER		"AICL_RERUN_VOTER"
+#define LEGACY_UNKNOWN_VOTER		"LEGACY_UNKNOWN_VOTER"
 
 #define VCONN_MAX_ATTEMPTS	3
 #define OTG_MAX_ATTEMPTS	3
@@ -80,6 +83,7 @@
 	BOOST_BACK_WA			= BIT(1),
 	TYPEC_CC2_REMOVAL_WA_BIT	= BIT(2),
 	QC_AUTH_INTERRUPT_WA_BIT	= BIT(3),
+	OTG_WA				= BIT(4),
 };
 
 enum smb_irq_index {
@@ -271,6 +275,7 @@
 	struct votable		*hvdcp_enable_votable;
 	struct votable		*apsd_disable_votable;
 	struct votable		*hvdcp_hw_inov_dis_votable;
+	struct votable		*usb_irq_enable_votable;
 
 	/* work */
 	struct work_struct	bms_update_work;
@@ -283,6 +288,7 @@
 	struct work_struct	vconn_oc_work;
 	struct delayed_work	otg_ss_done_work;
 	struct delayed_work	icl_change_work;
+	struct delayed_work	pl_enable_work;
 
 	/* cached status */
 	int			voltage_min_uv;
@@ -305,17 +311,16 @@
 	int			otg_attempts;
 	int			vconn_attempts;
 	int			default_icl_ua;
+	int			otg_cl_ua;
 
 	/* workaround flag */
 	u32			wa_flags;
 	enum cc2_sink_type	cc2_sink_detach_flag;
 	int			boost_current_ua;
+	int			temp_speed_reading_count;
 
 	/* extcon for VBUS / ID notification to USB for uUSB */
 	struct extcon_dev	*extcon;
-	bool			usb_ever_removed;
-
-	int			icl_reduction_ua;
 
 	/* qnovo */
 	int			qnovo_fcc_ua;
@@ -453,6 +458,8 @@
 				union power_supply_propval *val);
 int smblib_get_prop_die_health(struct smb_charger *chg,
 			       union power_supply_propval *val);
+int smblib_get_prop_charge_qnovo_enable(struct smb_charger *chg,
+			       union power_supply_propval *val);
 int smblib_set_prop_pd_current_max(struct smb_charger *chg,
 				const union power_supply_propval *val);
 int smblib_set_prop_usb_current_max(struct smb_charger *chg,
@@ -473,14 +480,17 @@
 				union power_supply_propval *val);
 int smblib_set_prop_ship_mode(struct smb_charger *chg,
 				const union power_supply_propval *val);
+int smblib_set_prop_charge_qnovo_enable(struct smb_charger *chg,
+				const union power_supply_propval *val);
 void smblib_suspend_on_debug_battery(struct smb_charger *chg);
 int smblib_rerun_apsd_if_required(struct smb_charger *chg);
 int smblib_get_prop_fcc_delta(struct smb_charger *chg,
 			       union power_supply_propval *val);
 int smblib_icl_override(struct smb_charger *chg, bool override);
-int smblib_set_icl_reduction(struct smb_charger *chg, int reduction_ua);
 int smblib_dp_dm(struct smb_charger *chg, int val);
 int smblib_rerun_aicl(struct smb_charger *chg);
+int smblib_set_icl_current(struct smb_charger *chg, int icl_ua);
+int smblib_get_charge_current(struct smb_charger *chg, int *total_current_ua);
 
 int smblib_init(struct smb_charger *chg);
 int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
index f4afb80..167666a 100644
--- a/drivers/power/supply/qcom/smb-reg.h
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -628,6 +628,7 @@
 
 #define USBIN_LOAD_CFG_REG			(USBIN_BASE + 0x65)
 #define USBIN_OV_CH_LOAD_OPTION_BIT		BIT(7)
+#define ICL_OVERRIDE_AFTER_APSD_BIT		BIT(4)
 
 #define USBIN_ICL_OPTIONS_REG			(USBIN_BASE + 0x66)
 #define CFG_USB3P0_SEL_BIT			BIT(2)
@@ -918,6 +919,7 @@
 
 #define MISC_CFG_REG				(MISC_BASE + 0x52)
 #define GSM_PA_ON_ADJ_SEL_BIT			BIT(0)
+#define STAT_PARALLEL_1400MA_EN_CFG_BIT		BIT(3)
 #define TCC_DEBOUNCE_20MS_BIT			BIT(5)
 
 #define SNARL_BARK_BITE_WD_CFG_REG		(MISC_BASE + 0x53)
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index 1e89a721..83374bb 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -28,7 +28,7 @@
 #include "smb-reg.h"
 #include "smb-lib.h"
 #include "storm-watch.h"
-#include "pmic-voter.h"
+#include <linux/pmic-voter.h>
 
 #define SMB138X_DEFAULT_FCC_UA 1000000
 #define SMB138X_DEFAULT_ICL_UA 1500000
@@ -45,6 +45,7 @@
 #define STACKED_DIODE_EN_BIT		BIT(2)
 
 #define TDIE_AVG_COUNT	10
+#define MAX_SPEED_READING_TIMES		5
 
 enum {
 	OOB_COMP_WA_BIT = BIT(0),
@@ -95,6 +96,7 @@
 	int	dc_icl_ua;
 	int	chg_temp_max_mdegc;
 	int	connector_temp_max_mdegc;
+	int	pl_mode;
 };
 
 struct smb138x {
@@ -126,8 +128,16 @@
 	union power_supply_propval pval;
 	int rc = 0, avg = 0, i;
 	struct smb_charger *chg = &chip->chg;
+	int die_avg_count;
 
-	for (i = 0; i < TDIE_AVG_COUNT; i++) {
+	if (chg->temp_speed_reading_count < MAX_SPEED_READING_TIMES) {
+		chg->temp_speed_reading_count++;
+		die_avg_count = 1;
+	} else {
+		die_avg_count = TDIE_AVG_COUNT;
+	}
+
+	for (i = 0; i < die_avg_count; i++) {
 		pval.intval = 0;
 		rc = smblib_get_prop_charger_temp(chg, &pval);
 		if (rc < 0) {
@@ -137,7 +147,7 @@
 		}
 		avg += pval.intval;
 	}
-	val->intval = avg / TDIE_AVG_COUNT;
+	val->intval = avg / die_avg_count;
 	return rc;
 }
 
@@ -152,6 +162,11 @@
 		return -EINVAL;
 	}
 
+	rc = of_property_read_u32(node,
+				"qcom,parallel-mode", &chip->dt.pl_mode);
+	if (rc < 0)
+		chip->dt.pl_mode = POWER_SUPPLY_PL_USBMID_USBMID;
+
 	chip->dt.suspend_input = of_property_read_bool(node,
 				"qcom,suspend-input");
 
@@ -520,6 +535,8 @@
 	POWER_SUPPLY_PROP_CHARGING_ENABLED,
 	POWER_SUPPLY_PROP_PIN_ENABLED,
 	POWER_SUPPLY_PROP_INPUT_SUSPEND,
+	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
+	POWER_SUPPLY_PROP_CURRENT_MAX,
 	POWER_SUPPLY_PROP_VOLTAGE_MAX,
 	POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX,
 	POWER_SUPPLY_PROP_CURRENT_NOW,
@@ -559,6 +576,21 @@
 	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
 		rc = smblib_get_usb_suspend(chg, &val->intval);
 		break;
+	case POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED:
+		if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+		|| (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+			rc = smblib_get_prop_input_current_limited(chg, val);
+		else
+			val->intval = 0;
+		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+		|| (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+			rc = smblib_get_charge_param(chg, &chg->param.usb_icl,
+				&val->intval);
+		else
+			val->intval = 0;
+		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		rc = smblib_get_charge_param(chg, &chg->param.fv, &val->intval);
 		break;
@@ -579,7 +611,7 @@
 		val->strval = "smb138x";
 		break;
 	case POWER_SUPPLY_PROP_PARALLEL_MODE:
-		val->intval = POWER_SUPPLY_PL_USBMID_USBMID;
+		val->intval = chip->dt.pl_mode;
 		break;
 	case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
 		val->intval = smb138x_get_prop_connector_health(chip);
@@ -638,6 +670,12 @@
 	case POWER_SUPPLY_PROP_INPUT_SUSPEND:
 		rc = smb138x_set_parallel_suspend(chip, (bool)val->intval);
 		break;
+	case POWER_SUPPLY_PROP_CURRENT_MAX:
+		if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+		|| (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
+			rc = smblib_set_charge_param(chg, &chg->param.usb_icl,
+				val->intval);
+		break;
 	case POWER_SUPPLY_PROP_VOLTAGE_MAX:
 		rc = smblib_set_charge_param(chg, &chg->param.fv, val->intval);
 		break;
@@ -1449,6 +1487,16 @@
 		goto cleanup;
 	}
 
+	if ((chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+		|| (chip->dt.pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT)) {
+		rc = smb138x_init_vbus_regulator(chip);
+		if (rc < 0) {
+			pr_err("Couldn't initialize vbus regulator rc=%d\n",
+				rc);
+			return rc;
+		}
+	}
+
 	rc = smb138x_init_parallel_psy(chip);
 	if (rc < 0) {
 		pr_err("Couldn't initialize parallel psy rc=%d\n", rc);
@@ -1473,6 +1521,8 @@
 	smblib_deinit(chg);
 	if (chip->parallel_psy)
 		power_supply_unregister(chip->parallel_psy);
+	if (chg->vbus_vreg && chg->vbus_vreg->rdev)
+		regulator_unregister(chg->vbus_vreg->rdev);
 	return rc;
 }
 
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index d326b80..ee23fc7 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -32,6 +32,7 @@
 #include "ufs_quirks.h"
 #include "ufs-qcom-ice.h"
 #include "ufs-qcom-debugfs.h"
+#include <linux/clk/qcom.h>
 
 #define MAX_PROP_SIZE		   32
 #define VDDP_REF_CLK_MIN_UV        1200000
@@ -356,6 +357,28 @@
 	return err;
 }
 
+static void ufs_qcom_force_mem_config(struct ufs_hba *hba)
+{
+	struct ufs_clk_info *clki;
+
+	/*
+	 * Configure the behavior of ufs clocks core and peripheral
+	 * memory state when they are turned off.
+	 * This configuration is required to allow retaining
+	 * ICE crypto configuration (including keys) when
+	 * core_clk_ice is turned off, and powering down
+	 * non-ICE RAMs of host controller.
+	 */
+	list_for_each_entry(clki, &hba->clk_list_head, list) {
+		if (!strcmp(clki->name, "core_clk_ice"))
+			clk_set_flags(clki->clk, CLKFLAG_RETAIN_MEM);
+		else
+			clk_set_flags(clki->clk, CLKFLAG_NORETAIN_MEM);
+		clk_set_flags(clki->clk, CLKFLAG_NORETAIN_PERIPH);
+		clk_set_flags(clki->clk, CLKFLAG_PERIPH_OFF_CLEAR);
+	}
+}
+
 static int ufs_qcom_hce_enable_notify(struct ufs_hba *hba,
 				      enum ufs_notify_change_status status)
 {
@@ -364,6 +387,7 @@
 
 	switch (status) {
 	case PRE_CHANGE:
+		ufs_qcom_force_mem_config(hba);
 		ufs_qcom_power_up_sequence(hba);
 		/*
 		 * The PHY PLL output is the source of tx/rx lane symbol
diff --git a/drivers/tty/serial/msm_geni_serial.c b/drivers/tty/serial/msm_geni_serial.c
index 7c4654c..5b48323 100644
--- a/drivers/tty/serial/msm_geni_serial.c
+++ b/drivers/tty/serial/msm_geni_serial.c
@@ -92,7 +92,7 @@
 #define UART_OVERSAMPLING	(32)
 #define STALE_TIMEOUT		(16)
 #define GENI_UART_NR_PORTS	(15)
-#define DEF_FIFO_DEPTH_WORDS	(64)
+#define DEF_FIFO_DEPTH_WORDS	(16)
 #define DEF_FIFO_WIDTH_BITS	(32)
 
 struct msm_geni_serial_port {
@@ -210,8 +210,8 @@
 	unsigned int reg;
 	bool met = false;
 
-	while (iter < 100) {
-		reg = geni_read_reg(uport->membase, offset);
+	while (iter < 1000) {
+		reg = geni_read_reg_nolog(uport->membase, offset);
 		if (reg & bit_field) {
 			met = true;
 			break;
@@ -225,7 +225,7 @@
 static void msm_geni_serial_setup_tx(struct uart_port *uport,
 					unsigned int xmit_size)
 {
-	geni_write_reg(xmit_size, uport->membase, SE_UART_TX_TRANS_LEN);
+	geni_write_reg_nolog(xmit_size, uport->membase, SE_UART_TX_TRANS_LEN);
 	geni_setup_m_cmd(uport->membase, UART_START_TX, 0);
 	/*
 	 * Writes to enable the primary sequencer should go through before
@@ -252,7 +252,7 @@
 								M_CMD_ABORT_EN);
 		}
 	}
-	geni_write_reg(irq_clear, uport->membase, SE_GENI_M_IRQ_CLEAR);
+	geni_write_reg_nolog(irq_clear, uport->membase, SE_GENI_M_IRQ_CLEAR);
 }
 
 #ifdef CONFIG_CONSOLE_POLL
@@ -268,10 +268,12 @@
 		return -ENXIO;
 	}
 
-	m_irq_status = geni_read_reg(uport->membase, SE_GENI_M_IRQ_STATUS);
-	s_irq_status = geni_read_reg(uport->membase, SE_GENI_S_IRQ_STATUS);
-	geni_write_reg(m_irq_status, uport->membase, SE_GENI_M_IRQ_CLEAR);
-	geni_write_reg(s_irq_status, uport->membase, SE_GENI_S_IRQ_CLEAR);
+	m_irq_status = geni_read_reg_nolog(uport->membase,
+						SE_GENI_M_IRQ_STATUS);
+	s_irq_status = geni_read_reg_nolog(uport->membase,
+						SE_GENI_S_IRQ_STATUS);
+	geni_write_reg_nolog(m_irq_status, uport->membase, SE_GENI_M_IRQ_CLEAR);
+	geni_write_reg_nolog(s_irq_status, uport->membase, SE_GENI_S_IRQ_CLEAR);
 
 	if (!(msm_geni_serial_poll_bit(uport, SE_GENI_RX_FIFO_STATUS,
 			RX_FIFO_WC_MSK))) {
@@ -284,7 +286,7 @@
 	 * getting valid RX fifo status.
 	 */
 	mb();
-	rx_fifo = geni_read_reg(uport->membase, SE_GENI_RX_FIFOn);
+	rx_fifo = geni_read_reg_nolog(uport->membase, SE_GENI_RX_FIFOn);
 	rx_fifo &= 0xFF;
 	return rx_fifo;
 }
@@ -296,14 +298,14 @@
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 
 	se_config_packing(uport->membase, 8, 1, false);
-	geni_write_reg(port->tx_wm, uport->membase,
+	geni_write_reg_nolog(port->tx_wm, uport->membase,
 					SE_GENI_TX_WATERMARK_REG);
 	msm_geni_serial_setup_tx(uport, 1);
 	if (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
 				M_TX_FIFO_WATERMARK_EN))
 		WARN_ON(1);
-	geni_write_reg(b, uport->membase, SE_GENI_TX_FIFOn);
-	geni_write_reg(M_TX_FIFO_WATERMARK_EN, uport->membase,
+	geni_write_reg_nolog(b, uport->membase, SE_GENI_TX_FIFOn);
+	geni_write_reg_nolog(M_TX_FIFO_WATERMARK_EN, uport->membase,
 							SE_GENI_M_IRQ_CLEAR);
 	/*
 	 * Ensure FIFO write goes through before polling for status but.
@@ -316,7 +318,7 @@
 #if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
 static void msm_geni_serial_wr_char(struct uart_port *uport, int ch)
 {
-	geni_write_reg(ch, uport->membase, SE_GENI_TX_FIFOn);
+	geni_write_reg_nolog(ch, uport->membase, SE_GENI_TX_FIFOn);
 	/*
 	 * Ensure FIFO write clear goes through before
 	 * next iteration.
@@ -341,7 +343,7 @@
 
 	bytes_to_send += new_line;
 	se_config_packing(uport->membase, 8, 1, false);
-	geni_write_reg(port->tx_wm, uport->membase,
+	geni_write_reg_nolog(port->tx_wm, uport->membase,
 					SE_GENI_TX_WATERMARK_REG);
 	msm_geni_serial_setup_tx(uport, bytes_to_send);
 	i = 0;
@@ -350,7 +352,7 @@
 		u32 avail_fifo_bytes = (port->tx_fifo_depth - port->tx_wm);
 
 		while (!msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
-							M_TX_FIFO_WATERMARK_EN))
+						M_TX_FIFO_WATERMARK_EN))
 			cpu_relax();
 		chars_to_write = min((unsigned int)(count - i),
 							avail_fifo_bytes);
@@ -358,8 +360,10 @@
 			chars_to_write = (avail_fifo_bytes >> 1);
 		uart_console_write(uport, (s + i), chars_to_write,
 						msm_geni_serial_wr_char);
-		geni_write_reg(M_TX_FIFO_WATERMARK_EN, uport->membase,
+		geni_write_reg_nolog(M_TX_FIFO_WATERMARK_EN, uport->membase,
 							SE_GENI_M_IRQ_CLEAR);
+		/* Ensure this goes through before polling for WM IRQ again.*/
+		mb();
 		i += chars_to_write;
 	}
 	msm_geni_serial_poll_cancel_tx(uport);
@@ -401,7 +405,7 @@
 		int bytes = 4;
 
 		*(msm_port->rx_fifo) =
-			geni_read_reg(uport->membase, SE_GENI_RX_FIFOn);
+			geni_read_reg_nolog(uport->membase, SE_GENI_RX_FIFOn);
 		rx_char = (unsigned char *)msm_port->rx_fifo;
 
 		if (i == (rx_fifo_wc - 1)) {
@@ -437,12 +441,13 @@
 	unsigned int geni_m_irq_en;
 	struct msm_geni_serial_port *port = GET_DEV_PORT(uport);
 
-	geni_m_irq_en = geni_read_reg(uport->membase, SE_GENI_M_IRQ_EN);
+	geni_m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
 	geni_m_irq_en |= M_TX_FIFO_WATERMARK_EN;
 
 	se_config_packing(uport->membase, 8, 4, false);
-	geni_write_reg(port->tx_wm, uport->membase, SE_GENI_TX_WATERMARK_REG);
-	geni_write_reg(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
+	geni_write_reg_nolog(port->tx_wm, uport->membase,
+						SE_GENI_TX_WATERMARK_REG);
+	geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
 	/* Geni command setup/irq enables should complete before returning.*/
 	mb();
 }
@@ -452,12 +457,12 @@
 	unsigned int geni_m_irq_en;
 	unsigned int geni_status;
 
-	geni_m_irq_en = geni_read_reg(uport->membase, SE_GENI_M_IRQ_EN);
+	geni_m_irq_en = geni_read_reg_nolog(uport->membase, SE_GENI_M_IRQ_EN);
 	geni_m_irq_en &= ~(M_TX_FIFO_WATERMARK_EN | M_CMD_DONE_EN);
-	geni_write_reg(0, uport->membase, SE_GENI_TX_WATERMARK_REG);
-	geni_write_reg(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
+	geni_write_reg_nolog(0, uport->membase, SE_GENI_TX_WATERMARK_REG);
+	geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
 
-	geni_status = geni_read_reg(uport->membase,
+	geni_status = geni_read_reg_nolog(uport->membase,
 						SE_GENI_STATUS);
 	/* Possible stop tx is called multiple times. */
 	if (!(geni_status & M_GENI_CMD_ACTIVE))
@@ -469,10 +474,10 @@
 		geni_abort_m_cmd(uport->membase);
 		msm_geni_serial_poll_bit(uport, SE_GENI_M_IRQ_STATUS,
 							M_CMD_ABORT_EN);
-		geni_write_reg(M_CMD_ABORT_EN, uport->membase,
+		geni_write_reg_nolog(M_CMD_ABORT_EN, uport->membase,
 							SE_GENI_M_IRQ_CLEAR);
 	}
-	geni_write_reg(M_CMD_CANCEL_EN, uport, SE_GENI_M_IRQ_CLEAR);
+	geni_write_reg_nolog(M_CMD_CANCEL_EN, uport, SE_GENI_M_IRQ_CLEAR);
 }
 
 static void msm_geni_serial_start_rx(struct uart_port *uport)
@@ -480,16 +485,16 @@
 	unsigned int geni_s_irq_en;
 	unsigned int geni_m_irq_en;
 
-	geni_s_irq_en = geni_read_reg(uport->membase,
+	geni_s_irq_en = geni_read_reg_nolog(uport->membase,
 						SE_GENI_S_IRQ_EN);
-	geni_m_irq_en = geni_read_reg(uport->membase,
+	geni_m_irq_en = geni_read_reg_nolog(uport->membase,
 						SE_GENI_M_IRQ_EN);
 	geni_s_irq_en |= S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN;
 	geni_m_irq_en |= M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN;
 
 	geni_setup_s_cmd(uport->membase, UART_START_READ, 0);
-	geni_write_reg(geni_s_irq_en, uport->membase, SE_GENI_S_IRQ_EN);
-	geni_write_reg(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
+	geni_write_reg_nolog(geni_s_irq_en, uport->membase, SE_GENI_S_IRQ_EN);
+	geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
 	/*
 	 * Ensure the writes to the secondary sequencer and interrupt enables
 	 * go through.
@@ -503,21 +508,21 @@
 	unsigned int geni_m_irq_en;
 	unsigned int geni_status;
 
-	geni_s_irq_en = geni_read_reg(uport->membase,
+	geni_s_irq_en = geni_read_reg_nolog(uport->membase,
 						SE_GENI_S_IRQ_EN);
-	geni_m_irq_en = geni_read_reg(uport->membase,
+	geni_m_irq_en = geni_read_reg_nolog(uport->membase,
 						SE_GENI_M_IRQ_EN);
 	geni_s_irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
 	geni_m_irq_en &= ~(M_RX_FIFO_WATERMARK_EN | M_RX_FIFO_LAST_EN);
 
-	geni_write_reg(geni_s_irq_en, uport->membase, SE_GENI_S_IRQ_EN);
-	geni_write_reg(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
+	geni_write_reg_nolog(geni_s_irq_en, uport->membase, SE_GENI_S_IRQ_EN);
+	geni_write_reg_nolog(geni_m_irq_en, uport->membase, SE_GENI_M_IRQ_EN);
 
-	geni_status = geni_read_reg(uport->membase, SE_GENI_STATUS);
+	geni_status = geni_read_reg_nolog(uport->membase, SE_GENI_STATUS);
 	/* Possible stop rx is called multiple times. */
 	if (!(geni_status & S_GENI_CMD_ACTIVE))
 		return;
-	geni_write_reg(S_GENI_CMD_CANCEL, uport->membase,
+	geni_write_reg_nolog(S_GENI_CMD_CANCEL, uport->membase,
 						SE_GENI_S_CMD_CTRL_REG);
 	if (!msm_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
 							S_CMD_CANCEL_EN))
@@ -566,7 +571,7 @@
 	struct msm_geni_serial_port *msm_port = GET_DEV_PORT(uport);
 
 	tport = &uport->state->port;
-	rx_fifo_status = geni_read_reg(uport->membase,
+	rx_fifo_status = geni_read_reg_nolog(uport->membase,
 				SE_GENI_RX_FIFO_STATUS);
 	rx_fifo_wc = rx_fifo_status & RX_FIFO_WC_MSK;
 	rx_last_byte_valid = ((rx_fifo_status & RX_LAST_BYTE_VALID_MSK) >>
@@ -590,7 +595,7 @@
 	unsigned int xmit_size;
 	unsigned int fifo_width_bytes = msm_port->tx_fifo_width >> 3;
 
-	tx_fifo_status = geni_read_reg(uport->membase,
+	tx_fifo_status = geni_read_reg_nolog(uport->membase,
 					SE_GENI_TX_FIFO_STATUS);
 	if (uart_circ_empty(xmit) && !tx_fifo_status) {
 		msm_geni_serial_stop_tx(uport);
@@ -621,7 +626,7 @@
 
 		for (c = 0; c < tx_bytes ; c++)
 			buf |= (xmit->buf[xmit->tail + c] << (c * 8));
-		geni_write_reg(buf, uport->membase, SE_GENI_TX_FIFOn);
+		geni_write_reg_nolog(buf, uport->membase, SE_GENI_TX_FIFOn);
 		xmit->tail = (xmit->tail + tx_bytes) & (UART_XMIT_SIZE - 1);
 		i += tx_bytes;
 		uport->icount.tx += tx_bytes;
@@ -642,10 +647,12 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&uport->lock, flags);
-	m_irq_status = geni_read_reg(uport->membase, SE_GENI_M_IRQ_STATUS);
-	s_irq_status = geni_read_reg(uport->membase, SE_GENI_S_IRQ_STATUS);
-	geni_write_reg(m_irq_status, uport->membase, SE_GENI_M_IRQ_CLEAR);
-	geni_write_reg(s_irq_status, uport->membase, SE_GENI_S_IRQ_CLEAR);
+	m_irq_status = geni_read_reg_nolog(uport->membase,
+							SE_GENI_M_IRQ_STATUS);
+	s_irq_status = geni_read_reg_nolog(uport->membase,
+							SE_GENI_S_IRQ_STATUS);
+	geni_write_reg_nolog(m_irq_status, uport->membase, SE_GENI_M_IRQ_CLEAR);
+	geni_write_reg_nolog(s_irq_status, uport->membase, SE_GENI_S_IRQ_CLEAR);
 
 	if ((m_irq_status & M_ILLEGAL_CMD_EN)) {
 		WARN_ON(1);
@@ -814,17 +821,24 @@
 		u32 rx_parity_cfg, u32 bits_per_char, u32 stop_bit_len,
 		u32 rxstale, u32 s_clk_cfg)
 {
-	geni_write_reg(loopback, uport->membase, SE_UART_LOOPBACK_CFG);
-	geni_write_reg(tx_trans_cfg, uport->membase, SE_UART_TX_TRANS_CFG);
-	geni_write_reg(tx_parity_cfg, uport->membase, SE_UART_TX_PARITY_CFG);
-	geni_write_reg(rx_trans_cfg, uport->membase, SE_UART_RX_TRANS_CFG);
-	geni_write_reg(rx_parity_cfg, uport->membase, SE_UART_RX_PARITY_CFG);
-	geni_write_reg(bits_per_char, uport->membase, SE_UART_TX_WORD_LEN);
-	geni_write_reg(bits_per_char, uport->membase, SE_UART_RX_WORD_LEN);
-	geni_write_reg(stop_bit_len, uport->membase, SE_UART_TX_STOP_BIT_LEN);
-	geni_write_reg(rxstale, uport->membase, SE_UART_RX_STALE_CNT);
-	geni_write_reg(s_clk_cfg, uport->membase, GENI_SER_M_CLK_CFG);
-	geni_write_reg(s_clk_cfg, uport->membase, GENI_SER_S_CLK_CFG);
+	geni_write_reg_nolog(loopback, uport->membase, SE_UART_LOOPBACK_CFG);
+	geni_write_reg_nolog(tx_trans_cfg, uport->membase,
+							SE_UART_TX_TRANS_CFG);
+	geni_write_reg_nolog(tx_parity_cfg, uport->membase,
+							SE_UART_TX_PARITY_CFG);
+	geni_write_reg_nolog(rx_trans_cfg, uport->membase,
+							SE_UART_RX_TRANS_CFG);
+	geni_write_reg_nolog(rx_parity_cfg, uport->membase,
+							SE_UART_RX_PARITY_CFG);
+	geni_write_reg_nolog(bits_per_char, uport->membase,
+							SE_UART_TX_WORD_LEN);
+	geni_write_reg_nolog(bits_per_char, uport->membase,
+							SE_UART_RX_WORD_LEN);
+	geni_write_reg_nolog(stop_bit_len, uport->membase,
+						SE_UART_TX_STOP_BIT_LEN);
+	geni_write_reg_nolog(rxstale, uport->membase, SE_UART_RX_STALE_CNT);
+	geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_M_CLK_CFG);
+	geni_write_reg_nolog(s_clk_cfg, uport->membase, GENI_SER_S_CLK_CFG);
 }
 
 static int get_clk_div_rate(unsigned int baud, unsigned long *desired_clk_rate)
@@ -843,6 +857,7 @@
 	}
 
 	clk_div = ser_clk / *desired_clk_rate;
+	*desired_clk_rate = ser_clk;
 exit_get_clk_div_rate:
 	return clk_div;
 }
@@ -875,10 +890,14 @@
 	ser_clk_cfg |= (clk_div << CLK_DIV_SHFT);
 
 	/* parity */
-	tx_trans_cfg = geni_read_reg(uport->membase, SE_UART_TX_TRANS_CFG);
-	tx_parity_cfg = geni_read_reg(uport->membase, SE_UART_TX_PARITY_CFG);
-	rx_trans_cfg = geni_read_reg(uport->membase, SE_UART_RX_TRANS_CFG);
-	rx_parity_cfg = geni_read_reg(uport->membase, SE_UART_RX_PARITY_CFG);
+	tx_trans_cfg = geni_read_reg_nolog(uport->membase,
+							SE_UART_TX_TRANS_CFG);
+	tx_parity_cfg = geni_read_reg_nolog(uport->membase,
+							SE_UART_TX_PARITY_CFG);
+	rx_trans_cfg = geni_read_reg_nolog(uport->membase,
+							SE_UART_RX_TRANS_CFG);
+	rx_parity_cfg = geni_read_reg_nolog(uport->membase,
+							SE_UART_RX_PARITY_CFG);
 	if (termios->c_cflag & PARENB) {
 		tx_trans_cfg |= UART_TX_PAR_EN;
 		rx_trans_cfg |= UART_RX_PAR_EN;
@@ -947,7 +966,8 @@
 	unsigned int tx_fifo_status;
 	unsigned int is_tx_empty = 1;
 
-	tx_fifo_status = geni_read_reg(port->membase, SE_GENI_TX_FIFO_STATUS);
+	tx_fifo_status = geni_read_reg_nolog(port->membase,
+						SE_GENI_TX_FIFO_STATUS);
 	if (tx_fifo_status)
 		is_tx_empty = 0;
 
@@ -1264,9 +1284,6 @@
 		goto exit_geni_serial_probe;
 	}
 
-	/* Default core clk to 115200 Baud */
-	clk_set_rate(dev_port->serial_rsc.se_clk, (115200 * UART_OVERSAMPLING));
-	uport->uartclk = clk_get_rate(dev_port->serial_rsc.se_clk);
 	dev_port->tx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
 	dev_port->rx_fifo_depth = DEF_FIFO_DEPTH_WORDS;
 	dev_port->tx_fifo_width = DEF_FIFO_WIDTH_BITS;
diff --git a/include/crypto/ice.h b/include/crypto/ice.h
new file mode 100644
index 0000000..558d136
--- /dev/null
+++ b/include/crypto/ice.h
@@ -0,0 +1,79 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QCOM_INLINE_CRYPTO_ENGINE_H_
+#define _QCOM_INLINE_CRYPTO_ENGINE_H_
+
+#include <linux/platform_device.h>
+
+struct request;
+
+enum ice_cryto_algo_mode {
+	ICE_CRYPTO_ALGO_MODE_AES_ECB = 0x0,
+	ICE_CRYPTO_ALGO_MODE_AES_XTS = 0x3,
+};
+
+enum ice_crpto_key_size {
+	ICE_CRYPTO_KEY_SIZE_128 = 0x0,
+	ICE_CRYPTO_KEY_SIZE_256 = 0x2,
+};
+
+enum ice_crpto_key_mode {
+	ICE_CRYPTO_USE_KEY0_HW_KEY = 0x0,
+	ICE_CRYPTO_USE_KEY1_HW_KEY = 0x1,
+	ICE_CRYPTO_USE_LUT_SW_KEY0 = 0x2,
+	ICE_CRYPTO_USE_LUT_SW_KEY  = 0x3
+};
+
+struct ice_crypto_setting {
+	enum ice_crpto_key_size		key_size;
+	enum ice_cryto_algo_mode	algo_mode;
+	enum ice_crpto_key_mode		key_mode;
+	short				key_index;
+
+};
+
+struct ice_data_setting {
+	struct ice_crypto_setting	crypto_data;
+	bool				sw_forced_context_switch;
+	bool				decr_bypass;
+	bool				encr_bypass;
+};
+
+typedef void (*ice_error_cb)(void *, u32 error);
+
+struct qcom_ice_variant_ops *qcom_ice_get_variant_ops(struct device_node *node);
+struct platform_device *qcom_ice_get_pdevice(struct device_node *node);
+
+#ifdef CONFIG_CRYPTO_DEV_QCOM_ICE
+int qcom_ice_setup_ice_hw(const char *storage_type, int enable);
+#else
+static inline int qcom_ice_setup_ice_hw(const char *storage_type, int enable)
+{
+	return 0;
+}
+#endif
+
+struct qcom_ice_variant_ops {
+	const char *name;
+	int	(*init)(struct platform_device *, void *, ice_error_cb);
+	int	(*reset)(struct platform_device *);
+	int	(*resume)(struct platform_device *);
+	int	(*suspend)(struct platform_device *);
+	int	(*config_start)(struct platform_device *, struct request *,
+				struct ice_data_setting *, bool);
+	int	(*config_end)(struct request *);
+	int	(*status)(struct platform_device *);
+	void	(*debug)(struct platform_device *);
+};
+
+#endif /* _QCOM_INLINE_CRYPTO_ENGINE_H_ */
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index d52e335..96461d4 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -86,116 +86,111 @@
 #define GCC_QMIP_CAMERA_AHB_CLK					68
 #define GCC_QMIP_DISP_AHB_CLK					69
 #define GCC_QMIP_VIDEO_AHB_CLK					70
-#define GCC_QUPV3_WRAP0_CORE_2X_CLK				71
-#define GCC_QUPV3_WRAP0_CORE_2X_CLK_SRC				72
-#define GCC_QUPV3_WRAP0_CORE_CLK				73
-#define GCC_QUPV3_WRAP0_S0_CLK					74
-#define GCC_QUPV3_WRAP0_S0_CLK_SRC				75
-#define GCC_QUPV3_WRAP0_S1_CLK					76
-#define GCC_QUPV3_WRAP0_S1_CLK_SRC				77
-#define GCC_QUPV3_WRAP0_S2_CLK					78
-#define GCC_QUPV3_WRAP0_S2_CLK_SRC				79
-#define GCC_QUPV3_WRAP0_S3_CLK					80
-#define GCC_QUPV3_WRAP0_S3_CLK_SRC				81
-#define GCC_QUPV3_WRAP0_S4_CLK					82
-#define GCC_QUPV3_WRAP0_S4_CLK_SRC				83
-#define GCC_QUPV3_WRAP0_S5_CLK					84
-#define GCC_QUPV3_WRAP0_S5_CLK_SRC				85
-#define GCC_QUPV3_WRAP0_S6_CLK					86
-#define GCC_QUPV3_WRAP0_S6_CLK_SRC				87
-#define GCC_QUPV3_WRAP0_S7_CLK					88
-#define GCC_QUPV3_WRAP0_S7_CLK_SRC				89
-#define GCC_QUPV3_WRAP1_CORE_2X_CLK				90
-#define GCC_QUPV3_WRAP1_CORE_CLK				91
-#define GCC_QUPV3_WRAP1_S0_CLK					92
-#define GCC_QUPV3_WRAP1_S0_CLK_SRC				93
-#define GCC_QUPV3_WRAP1_S1_CLK					94
-#define GCC_QUPV3_WRAP1_S1_CLK_SRC				95
-#define GCC_QUPV3_WRAP1_S2_CLK					96
-#define GCC_QUPV3_WRAP1_S2_CLK_SRC				97
-#define GCC_QUPV3_WRAP1_S3_CLK					98
-#define GCC_QUPV3_WRAP1_S3_CLK_SRC				99
-#define GCC_QUPV3_WRAP1_S4_CLK					100
-#define GCC_QUPV3_WRAP1_S4_CLK_SRC				101
-#define GCC_QUPV3_WRAP1_S5_CLK					102
-#define GCC_QUPV3_WRAP1_S5_CLK_SRC				103
-#define GCC_QUPV3_WRAP1_S6_CLK					104
-#define GCC_QUPV3_WRAP1_S6_CLK_SRC				105
-#define GCC_QUPV3_WRAP1_S7_CLK					106
-#define GCC_QUPV3_WRAP1_S7_CLK_SRC				107
-#define GCC_QUPV3_WRAP_0_M_AHB_CLK				108
-#define GCC_QUPV3_WRAP_0_S_AHB_CLK				109
-#define GCC_QUPV3_WRAP_1_M_AHB_CLK				110
-#define GCC_QUPV3_WRAP_1_S_AHB_CLK				111
-#define GCC_RX1_USB2_CLKREF_CLK					112
-#define GCC_RX2_QLINK_CLKREF_CLK				113
-#define GCC_RX3_MODEM_CLKREF_CLK				114
-#define GCC_SDCC2_AHB_CLK					115
-#define GCC_SDCC2_APPS_CLK					116
-#define GCC_SDCC2_APPS_CLK_SRC					117
-#define GCC_SDCC4_AHB_CLK					118
-#define GCC_SDCC4_APPS_CLK					119
-#define GCC_SDCC4_APPS_CLK_SRC					120
-#define GCC_SYS_NOC_CPUSS_AHB_CLK				121
-#define GCC_TSIF_AHB_CLK					122
-#define GCC_TSIF_INACTIVITY_TIMERS_CLK				123
-#define GCC_TSIF_REF_CLK					124
-#define GCC_TSIF_REF_CLK_SRC					125
-#define GCC_UFS_CARD_AHB_CLK					126
-#define GCC_UFS_CARD_AXI_CLK					127
-#define GCC_UFS_CARD_AXI_CLK_SRC				128
-#define GCC_UFS_CARD_CLKREF_CLK					129
-#define GCC_UFS_CARD_ICE_CORE_CLK				130
-#define GCC_UFS_CARD_ICE_CORE_CLK_SRC				131
-#define GCC_UFS_CARD_PHY_AUX_CLK				132
-#define GCC_UFS_CARD_PHY_AUX_CLK_SRC				133
-#define GCC_UFS_CARD_RX_SYMBOL_0_CLK				134
-#define GCC_UFS_CARD_RX_SYMBOL_1_CLK				135
-#define GCC_UFS_CARD_TX_SYMBOL_0_CLK				136
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK				137
-#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC			138
-#define GCC_UFS_MEM_CLKREF_CLK					139
-#define GCC_UFS_PHY_AHB_CLK					140
-#define GCC_UFS_PHY_AXI_CLK					141
-#define GCC_UFS_PHY_AXI_CLK_SRC					142
-#define GCC_UFS_PHY_ICE_CORE_CLK				143
-#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				144
-#define GCC_UFS_PHY_PHY_AUX_CLK					145
-#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				146
-#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				147
-#define GCC_UFS_PHY_RX_SYMBOL_1_CLK				148
-#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				149
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK				150
-#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				151
-#define GCC_USB30_PRIM_MASTER_CLK				152
-#define GCC_USB30_PRIM_MASTER_CLK_SRC				153
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK				154
-#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			155
-#define GCC_USB30_PRIM_SLEEP_CLK				156
-#define GCC_USB30_SEC_MASTER_CLK				157
-#define GCC_USB30_SEC_MASTER_CLK_SRC				158
-#define GCC_USB30_SEC_MOCK_UTMI_CLK				159
-#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC				160
-#define GCC_USB30_SEC_SLEEP_CLK					161
-#define GCC_USB3_PRIM_CLKREF_CLK				162
-#define GCC_USB3_PRIM_PHY_AUX_CLK				163
-#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				164
-#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				165
-#define GCC_USB3_PRIM_PHY_PIPE_CLK				166
-#define GCC_USB3_SEC_CLKREF_CLK					167
-#define GCC_USB3_SEC_PHY_AUX_CLK				168
-#define GCC_USB3_SEC_PHY_AUX_CLK_SRC				169
-#define GCC_USB3_SEC_PHY_COM_AUX_CLK				170
-#define GCC_USB3_SEC_PHY_PIPE_CLK				171
-#define GCC_USB_PHY_CFG_AHB2PHY_CLK				172
-#define GCC_VIDEO_AHB_CLK					173
-#define GCC_VIDEO_AXI_CLK					174
-#define GCC_VIDEO_XO_CLK					175
-#define GPLL0							176
-#define GPLL0_OUT_EVEN						177
-#define GPLL0_OUT_MAIN						178
-#define GPLL1							179
-#define GPLL1_OUT_MAIN						180
+#define GCC_QUPV3_WRAP0_S0_CLK					71
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC				72
+#define GCC_QUPV3_WRAP0_S1_CLK					73
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC				74
+#define GCC_QUPV3_WRAP0_S2_CLK					75
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC				76
+#define GCC_QUPV3_WRAP0_S3_CLK					77
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC				78
+#define GCC_QUPV3_WRAP0_S4_CLK					79
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC				80
+#define GCC_QUPV3_WRAP0_S5_CLK					81
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC				82
+#define GCC_QUPV3_WRAP0_S6_CLK					83
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC				84
+#define GCC_QUPV3_WRAP0_S7_CLK					85
+#define GCC_QUPV3_WRAP0_S7_CLK_SRC				86
+#define GCC_QUPV3_WRAP1_S0_CLK					87
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC				88
+#define GCC_QUPV3_WRAP1_S1_CLK					89
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC				90
+#define GCC_QUPV3_WRAP1_S2_CLK					91
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC				92
+#define GCC_QUPV3_WRAP1_S3_CLK					93
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC				94
+#define GCC_QUPV3_WRAP1_S4_CLK					95
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC				96
+#define GCC_QUPV3_WRAP1_S5_CLK					97
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC				98
+#define GCC_QUPV3_WRAP1_S6_CLK					99
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC				100
+#define GCC_QUPV3_WRAP1_S7_CLK					101
+#define GCC_QUPV3_WRAP1_S7_CLK_SRC				102
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK				103
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK				104
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK				105
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK				106
+#define GCC_RX1_USB2_CLKREF_CLK					107
+#define GCC_RX2_QLINK_CLKREF_CLK				108
+#define GCC_RX3_MODEM_CLKREF_CLK				109
+#define GCC_SDCC2_AHB_CLK					110
+#define GCC_SDCC2_APPS_CLK					111
+#define GCC_SDCC2_APPS_CLK_SRC					112
+#define GCC_SDCC4_AHB_CLK					113
+#define GCC_SDCC4_APPS_CLK					114
+#define GCC_SDCC4_APPS_CLK_SRC					115
+#define GCC_SYS_NOC_CPUSS_AHB_CLK				116
+#define GCC_TSIF_AHB_CLK					117
+#define GCC_TSIF_INACTIVITY_TIMERS_CLK				118
+#define GCC_TSIF_REF_CLK					119
+#define GCC_TSIF_REF_CLK_SRC					120
+#define GCC_UFS_CARD_AHB_CLK					121
+#define GCC_UFS_CARD_AXI_CLK					122
+#define GCC_UFS_CARD_AXI_CLK_SRC				123
+#define GCC_UFS_CARD_CLKREF_CLK					124
+#define GCC_UFS_CARD_ICE_CORE_CLK				125
+#define GCC_UFS_CARD_ICE_CORE_CLK_SRC				126
+#define GCC_UFS_CARD_PHY_AUX_CLK				127
+#define GCC_UFS_CARD_PHY_AUX_CLK_SRC				128
+#define GCC_UFS_CARD_RX_SYMBOL_0_CLK				129
+#define GCC_UFS_CARD_RX_SYMBOL_1_CLK				130
+#define GCC_UFS_CARD_TX_SYMBOL_0_CLK				131
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK				132
+#define GCC_UFS_CARD_UNIPRO_CORE_CLK_SRC			133
+#define GCC_UFS_MEM_CLKREF_CLK					134
+#define GCC_UFS_PHY_AHB_CLK					135
+#define GCC_UFS_PHY_AXI_CLK					136
+#define GCC_UFS_PHY_AXI_CLK_SRC					137
+#define GCC_UFS_PHY_ICE_CORE_CLK				138
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC				139
+#define GCC_UFS_PHY_PHY_AUX_CLK					140
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC				141
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK				142
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK				143
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK				144
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK				145
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC				146
+#define GCC_USB30_PRIM_MASTER_CLK				147
+#define GCC_USB30_PRIM_MASTER_CLK_SRC				148
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK				149
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC			150
+#define GCC_USB30_PRIM_SLEEP_CLK				151
+#define GCC_USB30_SEC_MASTER_CLK				152
+#define GCC_USB30_SEC_MASTER_CLK_SRC				153
+#define GCC_USB30_SEC_MOCK_UTMI_CLK				154
+#define GCC_USB30_SEC_MOCK_UTMI_CLK_SRC				155
+#define GCC_USB30_SEC_SLEEP_CLK					156
+#define GCC_USB3_PRIM_CLKREF_CLK				157
+#define GCC_USB3_PRIM_PHY_AUX_CLK				158
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC				159
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK				160
+#define GCC_USB3_PRIM_PHY_PIPE_CLK				161
+#define GCC_USB3_SEC_CLKREF_CLK					162
+#define GCC_USB3_SEC_PHY_AUX_CLK				163
+#define GCC_USB3_SEC_PHY_AUX_CLK_SRC				164
+#define GCC_USB3_SEC_PHY_COM_AUX_CLK				165
+#define GCC_USB3_SEC_PHY_PIPE_CLK				166
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK				167
+#define GCC_VIDEO_AHB_CLK					168
+#define GCC_VIDEO_AXI_CLK					169
+#define GCC_VIDEO_XO_CLK					170
+#define GPLL0							171
+#define GPLL0_OUT_EVEN						172
+#define GPLL0_OUT_MAIN						173
+#define GPLL1							174
+#define GPLL1_OUT_MAIN						175
 
 /* GCC reset clocks */
 #define GCC_GPU_BCR						0
@@ -224,4 +219,10 @@
 #define GCC_USB3_DP_PHY_SEC_BCR					23
 #define GCC_USB_PHY_CFG_AHB2PHY_BCR				24
 
+/* Dummy clocks for rate measurement */
+#define MEASURE_ONLY_SNOC_CLK					0
+#define MEASURE_ONLY_CNOC_CLK					1
+#define MEASURE_ONLY_BIMC_CLK					2
+#define MEASURE_ONLY_IPA_2X_CLK					3
+
 #endif
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 6dd1547..9403bbe 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -83,6 +83,12 @@
 	bool enhanced_strobe;			/* hs400es selection */
 };
 
+/* states to represent load on the host */
+enum mmc_load {
+	MMC_LOAD_HIGH,
+	MMC_LOAD_LOW,
+};
+
 struct mmc_host_ops {
 	/*
 	 * 'enable' is called when the host is claimed and 'disable' is called
@@ -168,6 +174,7 @@
 	 */
 	int	(*multi_io_quirk)(struct mmc_card *card,
 				  unsigned int direction, int blk_size);
+	int	(*notify_load)(struct mmc_host *, enum mmc_load);
 };
 
 struct mmc_card;
@@ -320,6 +327,8 @@
 #define MMC_CAP2_NO_SD		(1 << 21)	/* Do not send SD commands during initialization */
 #define MMC_CAP2_NO_MMC		(1 << 22)	/* Do not send (e)MMC commands during initialization */
 #define MMC_CAP2_PACKED_WR_CONTROL (1 << 23)	/* Allow write packing control */
+#define MMC_CAP2_CLK_SCALE	(1 << 24)	/* Allow dynamic clk scaling */
+#define MMC_CAP2_ASYNC_SDIO_IRQ_4BIT_MODE (1 << 25)	/* Allows Asynchronous SDIO irq while card is in 4-bit mode */
 
 	mmc_pm_flag_t		pm_caps;	/* supported pm features */
 
@@ -431,10 +440,16 @@
 	struct io_latency_state io_lat_s;
 #endif
 
+	/*
+	 * Set to 1 to just stop the SDCLK to the card without
+	 * actually disabling the clock from it's source.
+	 */
+	bool			card_clock_off;
 	unsigned long		private[0] ____cacheline_aligned;
 };
 
 struct mmc_host *mmc_alloc_host(int extra, struct device *);
+extern bool mmc_host_may_gate_card(struct mmc_card *);
 int mmc_add_host(struct mmc_host *);
 void mmc_remove_host(struct mmc_host *);
 void mmc_free_host(struct mmc_host *);
diff --git a/include/linux/platform_data/qcom_crypto_device.h b/include/linux/platform_data/qcom_crypto_device.h
new file mode 100644
index 0000000..eadaa42
--- /dev/null
+++ b/include/linux/platform_data/qcom_crypto_device.h
@@ -0,0 +1,24 @@
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QCOM_CRYPTO_DEVICE__H
+#define __QCOM_CRYPTO_DEVICE__H
+
+struct msm_ce_hw_support {
+	uint32_t ce_shared;
+	uint32_t shared_ce_resource;
+	uint32_t hw_key_support;
+	uint32_t sha_hmac;
+	void *bus_scale_table;
+};
+
+#endif /* __QCOM_CRYPTO_DEVICE__H */
diff --git a/drivers/power/supply/qcom/pmic-voter.h b/include/linux/pmic-voter.h
similarity index 90%
rename from drivers/power/supply/qcom/pmic-voter.h
rename to include/linux/pmic-voter.h
index 031b9a0..f202bf7 100644
--- a/drivers/power/supply/qcom/pmic-voter.h
+++ b/include/linux/pmic-voter.h
@@ -24,6 +24,9 @@
 	NUM_VOTABLE_TYPES,
 };
 
+bool is_client_vote_enabled(struct votable *votable, const char *client_str);
+bool is_client_vote_enabled_locked(struct votable *votable,
+							const char *client_str);
 int get_client_vote(struct votable *votable, const char *client_str);
 int get_client_vote_locked(struct votable *votable, const char *client_str);
 int get_effective_result(struct votable *votable);
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index b46d6a8..77912a1 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -217,6 +217,7 @@
 	POWER_SUPPLY_PROP_DP_DM,
 	POWER_SUPPLY_PROP_INPUT_CURRENT_LIMITED,
 	POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+	POWER_SUPPLY_PROP_CHARGE_QNOVO_ENABLE,
 	POWER_SUPPLY_PROP_CURRENT_QNOVO,
 	POWER_SUPPLY_PROP_VOLTAGE_QNOVO,
 	POWER_SUPPLY_PROP_RERUN_AICL,
@@ -245,6 +246,7 @@
 	POWER_SUPPLY_PROP_DIE_HEALTH,
 	POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
 	POWER_SUPPLY_PROP_CTM_CURRENT_MAX,
+	POWER_SUPPLY_PROP_HW_CURRENT_MAX,
 	/* Local extensions of type int64_t */
 	POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
 	/* Properties of type `const char *' */
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index e1ad51e..0de4da6 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -248,6 +248,17 @@
 #define RX_DMA_IRQ_DELAY_MSK	(GENMASK(8, 6))
 #define RX_DMA_IRQ_DELAY_SHFT	(6)
 
+static inline unsigned int geni_read_reg_nolog(void __iomem *base, int offset)
+{
+	return readl_relaxed_no_log(base + offset);
+}
+
+static inline void geni_write_reg_nolog(unsigned int value, void __iomem *base,
+				int offset)
+{
+	return writel_relaxed_no_log(value, (base + offset));
+}
+
 static inline unsigned int geni_read_reg(void __iomem *base, int offset)
 {
 	return readl_relaxed(base + offset);
diff --git a/include/linux/qcrypto.h b/include/linux/qcrypto.h
new file mode 100644
index 0000000..252464a
--- /dev/null
+++ b/include/linux/qcrypto.h
@@ -0,0 +1,65 @@
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTO_H_
+#define _DRIVERS_CRYPTO_MSM_QCRYPTO_H_
+
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+
+#define QCRYPTO_CTX_KEY_MASK		0x000000ff
+#define QCRYPTO_CTX_USE_HW_KEY		0x00000001
+#define QCRYPTO_CTX_USE_PIPE_KEY	0x00000002
+
+#define QCRYPTO_CTX_XTS_MASK		0x0000ff00
+#define QCRYPTO_CTX_XTS_DU_SIZE_512B	0x00000100
+#define QCRYPTO_CTX_XTS_DU_SIZE_1KB	0x00000200
+
+
+int qcrypto_cipher_set_device(struct ablkcipher_request *req, unsigned int dev);
+int qcrypto_ahash_set_device(struct ahash_request *req, unsigned int dev);
+/*int qcrypto_aead_set_device(struct aead_request *req, unsigned int dev);*/
+
+int qcrypto_cipher_set_flag(struct ablkcipher_request *req, unsigned int flags);
+int qcrypto_ahash_set_flag(struct ahash_request *req, unsigned int flags);
+/*int qcrypto_aead_set_flag(struct aead_request *req, unsigned int flags);*/
+
+int qcrypto_cipher_clear_flag(struct ablkcipher_request *req,
+							unsigned int flags);
+int qcrypto_ahash_clear_flag(struct ahash_request *req, unsigned int flags);
+/*int qcrypto_aead_clear_flag(struct aead_request *req, unsigned int flags);*/
+
+struct crypto_engine_entry {
+	u32 hw_instance;
+	u32 ce_device;
+	int shared;
+};
+
+int qcrypto_get_num_engines(void);
+void qcrypto_get_engine_list(size_t num_engines,
+				struct crypto_engine_entry *arr);
+int qcrypto_cipher_set_device_hw(struct ablkcipher_request *req,
+				unsigned int fde_pfe,
+				unsigned int hw_inst);
+
+
+struct qcrypto_func_set {
+	int (*cipher_set)(struct ablkcipher_request *req,
+			unsigned int fde_pfe,
+			unsigned int hw_inst);
+	int (*cipher_flag)(struct ablkcipher_request *req, unsigned int flags);
+	int (*get_num_engines)(void);
+	void (*get_engine_list)(size_t num_engines,
+				struct crypto_engine_entry *arr);
+};
+
+#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTO_H */
diff --git a/include/soc/qcom/qseecomi.h b/include/soc/qcom/qseecomi.h
new file mode 100644
index 0000000..0efea04
--- /dev/null
+++ b/include/soc/qcom/qseecomi.h
@@ -0,0 +1,729 @@
+/*
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __QSEECOMI_H_
+#define __QSEECOMI_H_
+
+#include <linux/qseecom.h>
+
+#define QSEECOM_KEY_ID_SIZE   32
+
+#define QSEOS_RESULT_FAIL_SEND_CMD_NO_THREAD  -19   /*0xFFFFFFED*/
+#define QSEOS_RESULT_FAIL_UNSUPPORTED_CE_PIPE -63
+#define QSEOS_RESULT_FAIL_KS_OP               -64
+#define QSEOS_RESULT_FAIL_KEY_ID_EXISTS       -65
+#define QSEOS_RESULT_FAIL_MAX_KEYS            -66
+#define QSEOS_RESULT_FAIL_SAVE_KS             -67
+#define QSEOS_RESULT_FAIL_LOAD_KS             -68
+#define QSEOS_RESULT_FAIL_KS_ALREADY_DONE     -69
+#define QSEOS_RESULT_FAIL_KEY_ID_DNE          -70
+#define QSEOS_RESULT_FAIL_INCORRECT_PSWD      -71
+#define QSEOS_RESULT_FAIL_MAX_ATTEMPT         -72
+#define QSEOS_RESULT_FAIL_PENDING_OPERATION   -73
+
+enum qseecom_command_scm_resp_type {
+	QSEOS_APP_ID = 0xEE01,
+	QSEOS_LISTENER_ID
+};
+
+enum qseecom_qceos_cmd_id {
+	QSEOS_APP_START_COMMAND      = 0x01,
+	QSEOS_APP_SHUTDOWN_COMMAND,
+	QSEOS_APP_LOOKUP_COMMAND,
+	QSEOS_REGISTER_LISTENER,
+	QSEOS_DEREGISTER_LISTENER,
+	QSEOS_CLIENT_SEND_DATA_COMMAND,
+	QSEOS_LISTENER_DATA_RSP_COMMAND,
+	QSEOS_LOAD_EXTERNAL_ELF_COMMAND,
+	QSEOS_UNLOAD_EXTERNAL_ELF_COMMAND,
+	QSEOS_GET_APP_STATE_COMMAND,
+	QSEOS_LOAD_SERV_IMAGE_COMMAND,
+	QSEOS_UNLOAD_SERV_IMAGE_COMMAND,
+	QSEOS_APP_REGION_NOTIFICATION,
+	QSEOS_REGISTER_LOG_BUF_COMMAND,
+	QSEOS_RPMB_PROVISION_KEY_COMMAND,
+	QSEOS_RPMB_ERASE_COMMAND,
+	QSEOS_GENERATE_KEY  = 0x11,
+	QSEOS_DELETE_KEY,
+	QSEOS_MAX_KEY_COUNT,
+	QSEOS_SET_KEY,
+	QSEOS_UPDATE_KEY_USERINFO,
+	QSEOS_TEE_OPEN_SESSION,
+	QSEOS_TEE_INVOKE_COMMAND,
+	QSEOS_TEE_INVOKE_MODFD_COMMAND = QSEOS_TEE_INVOKE_COMMAND,
+	QSEOS_TEE_CLOSE_SESSION,
+	QSEOS_TEE_REQUEST_CANCELLATION,
+	QSEOS_CONTINUE_BLOCKED_REQ_COMMAND,
+	QSEOS_RPMB_CHECK_PROV_STATUS_COMMAND = 0x1B,
+	QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST = 0x1C,
+	QSEOS_TEE_OPEN_SESSION_WHITELIST = 0x1D,
+	QSEOS_TEE_INVOKE_COMMAND_WHITELIST = 0x1E,
+	QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST = 0x1F,
+	QSEOS_FSM_LTEOTA_REQ_CMD = 0x109,
+	QSEOS_FSM_LTEOTA_REQ_RSP_CMD = 0x110,
+	QSEOS_FSM_IKE_REQ_CMD = 0x203,
+	QSEOS_FSM_IKE_REQ_RSP_CMD = 0x204,
+	QSEOS_FSM_OEM_FUSE_WRITE_ROW = 0x301,
+	QSEOS_FSM_OEM_FUSE_READ_ROW = 0x302,
+	QSEOS_FSM_ENCFS_REQ_CMD = 0x403,
+	QSEOS_FSM_ENCFS_REQ_RSP_CMD = 0x404,
+
+	QSEOS_CMD_MAX     = 0xEFFFFFFF
+};
+
+enum qseecom_qceos_cmd_status {
+	QSEOS_RESULT_SUCCESS = 0,
+	QSEOS_RESULT_INCOMPLETE,
+	QSEOS_RESULT_BLOCKED_ON_LISTENER,
+	QSEOS_RESULT_FAILURE  = 0xFFFFFFFF
+};
+
+enum qseecom_pipe_type {
+	QSEOS_PIPE_ENC = 0x1,
+	QSEOS_PIPE_ENC_XTS = 0x2,
+	QSEOS_PIPE_AUTH = 0x4,
+	QSEOS_PIPE_ENUM_FILL = 0x7FFFFFFF
+};
+
+/* QSEE Reentrancy support phase */
+enum qseecom_qsee_reentrancy_phase {
+	QSEE_REENTRANCY_PHASE_0 = 0,
+	QSEE_REENTRANCY_PHASE_1,
+	QSEE_REENTRANCY_PHASE_2,
+	QSEE_REENTRANCY_PHASE_3,
+	QSEE_REENTRANCY_PHASE_MAX = 0xFF
+};
+
+__packed  struct qsee_apps_region_info_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t addr;
+	uint32_t size;
+};
+
+__packed  struct qsee_apps_region_info_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint64_t addr;
+	uint32_t size;
+};
+
+__packed struct qseecom_check_app_ireq {
+	uint32_t qsee_cmd_id;
+	char     app_name[MAX_APP_NAME_SIZE];
+};
+
+__packed struct qseecom_load_app_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;		/* Length of the mdt file */
+	uint32_t img_len;		/* Length of .bxx and .mdt files */
+	uint32_t phy_addr;		/* phy addr of the start of image */
+	char     app_name[MAX_APP_NAME_SIZE];	/* application name*/
+};
+
+__packed struct qseecom_load_app_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;
+	uint32_t img_len;
+	uint64_t phy_addr;
+	char     app_name[MAX_APP_NAME_SIZE];
+};
+
+__packed struct qseecom_unload_app_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t  app_id;
+};
+
+__packed struct qseecom_load_lib_image_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;
+	uint32_t img_len;
+	uint32_t phy_addr;
+};
+
+__packed struct qseecom_load_lib_image_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t mdt_len;
+	uint32_t img_len;
+	uint64_t phy_addr;
+};
+
+__packed struct qseecom_unload_lib_image_ireq {
+	uint32_t qsee_cmd_id;
+};
+
+__packed struct qseecom_register_listener_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	uint32_t sb_ptr;
+	uint32_t sb_len;
+};
+
+__packed struct qseecom_register_listener_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	uint64_t sb_ptr;
+	uint32_t sb_len;
+};
+
+__packed struct qseecom_unregister_listener_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t  listener_id;
+};
+
+__packed struct qseecom_client_send_data_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t app_id;
+	uint32_t req_ptr;
+	uint32_t req_len;
+	uint32_t rsp_ptr;/* First 4 bytes should be the return status */
+	uint32_t rsp_len;
+	uint32_t sglistinfo_ptr;
+	uint32_t sglistinfo_len;
+};
+
+__packed struct qseecom_client_send_data_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t app_id;
+	uint64_t req_ptr;
+	uint32_t req_len;
+	uint64_t rsp_ptr;
+	uint32_t rsp_len;
+	uint64_t sglistinfo_ptr;
+	uint32_t sglistinfo_len;
+};
+
+__packed struct qseecom_reg_log_buf_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t phy_addr;
+	uint32_t len;
+};
+
+__packed struct qseecom_reg_log_buf_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint64_t phy_addr;
+	uint32_t len;
+};
+
+/* send_data resp */
+__packed struct qseecom_client_listener_data_irsp {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	uint32_t status;
+	uint32_t sglistinfo_ptr;
+	uint32_t sglistinfo_len;
+};
+
+__packed struct qseecom_client_listener_data_64bit_irsp {
+	uint32_t qsee_cmd_id;
+	uint32_t listener_id;
+	uint32_t status;
+	uint64_t sglistinfo_ptr;
+	uint32_t sglistinfo_len;
+};
+
+/*
+ * struct qseecom_command_scm_resp - qseecom response buffer
+ * @cmd_status: value from enum tz_sched_cmd_status
+ * @sb_in_rsp_addr: points to physical location of response
+ *                buffer
+ * @sb_in_rsp_len: length of command response
+ */
+__packed struct qseecom_command_scm_resp {
+	uint32_t result;
+	enum qseecom_command_scm_resp_type resp_type;
+	unsigned int data;
+};
+
+struct qseecom_rpmb_provision_key {
+	uint32_t key_type;
+};
+
+__packed struct qseecom_client_send_service_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t key_type; /* in */
+	unsigned int req_len; /* in */
+	uint32_t rsp_ptr; /* in/out */
+	unsigned int rsp_len; /* in/out */
+};
+
+__packed struct qseecom_client_send_service_64bit_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t key_type;
+	unsigned int req_len;
+	uint64_t rsp_ptr;
+	unsigned int rsp_len;
+};
+
+__packed struct qseecom_key_generate_ireq {
+	uint32_t qsee_command_id;
+	uint32_t flags;
+	uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+	uint8_t hash32[QSEECOM_HASH_SIZE];
+};
+
+__packed struct qseecom_key_select_ireq {
+	uint32_t qsee_command_id;
+	uint32_t ce;
+	uint32_t pipe;
+	uint32_t pipe_type;
+	uint32_t flags;
+	uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+	uint8_t hash32[QSEECOM_HASH_SIZE];
+};
+
+__packed struct qseecom_key_delete_ireq {
+	uint32_t qsee_command_id;
+	uint32_t flags;
+	uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+	uint8_t hash32[QSEECOM_HASH_SIZE];
+
+};
+
+__packed struct qseecom_key_userinfo_update_ireq {
+	uint32_t qsee_command_id;
+	uint32_t flags;
+	uint8_t key_id[QSEECOM_KEY_ID_SIZE];
+	uint8_t current_hash32[QSEECOM_HASH_SIZE];
+	uint8_t new_hash32[QSEECOM_HASH_SIZE];
+};
+
+__packed struct qseecom_key_max_count_query_ireq {
+	uint32_t flags;
+};
+
+__packed struct qseecom_key_max_count_query_irsp {
+	uint32_t max_key_count;
+};
+
+__packed struct qseecom_qteec_ireq {
+	uint32_t    qsee_cmd_id;
+	uint32_t    app_id;
+	uint32_t    req_ptr;
+	uint32_t    req_len;
+	uint32_t    resp_ptr;
+	uint32_t    resp_len;
+	uint32_t    sglistinfo_ptr;
+	uint32_t    sglistinfo_len;
+};
+
+__packed struct qseecom_qteec_64bit_ireq {
+	uint32_t    qsee_cmd_id;
+	uint32_t    app_id;
+	uint64_t    req_ptr;
+	uint32_t    req_len;
+	uint64_t    resp_ptr;
+	uint32_t    resp_len;
+	uint64_t    sglistinfo_ptr;
+	uint32_t    sglistinfo_len;
+};
+
+__packed struct qseecom_client_send_fsm_key_req {
+	uint32_t qsee_cmd_id;
+	uint32_t req_ptr;
+	uint32_t req_len;
+	uint32_t rsp_ptr;
+	uint32_t rsp_len;
+};
+
+__packed struct qseecom_continue_blocked_request_ireq {
+	uint32_t qsee_cmd_id;
+	uint32_t app_id;
+};
+
+
+/**********      ARMV8 SMC INTERFACE TZ MACRO     *******************/
+
+#define TZ_SVC_APP_MGR                   1     /* Application management */
+#define TZ_SVC_LISTENER                  2     /* Listener service management */
+#define TZ_SVC_EXTERNAL                  3     /* External image loading */
+#define TZ_SVC_RPMB                      4     /* RPMB */
+#define TZ_SVC_KEYSTORE                  5     /* Keystore management */
+#define TZ_SVC_ES                        16    /* Enterprise Security */
+#define TZ_SVC_MDTP                      18    /* Mobile Device Theft */
+
+/*----------------------------------------------------------------------------
+ * Owning Entity IDs (defined by ARM SMC doc)
+ * ---------------------------------------------------------------------------
+ */
+#define TZ_OWNER_ARM                     0     /** ARM Architecture call ID */
+#define TZ_OWNER_CPU                     1     /** CPU service call ID */
+#define TZ_OWNER_SIP                     2     /** SIP service call ID */
+#define TZ_OWNER_OEM                     3     /** OEM service call ID */
+#define TZ_OWNER_STD                     4     /** Standard service call ID */
+
+/** Values 5-47 are reserved for future use */
+
+/** Trusted Application call IDs */
+#define TZ_OWNER_TZ_APPS                 48
+#define TZ_OWNER_TZ_APPS_RESERVED        49
+/** Trusted OS Call IDs */
+#define TZ_OWNER_QSEE_OS                 50
+#define TZ_OWNER_MOBI_OS                 51
+#define TZ_OWNER_OS_RESERVED_3           52
+#define TZ_OWNER_OS_RESERVED_4           53
+#define TZ_OWNER_OS_RESERVED_5           54
+#define TZ_OWNER_OS_RESERVED_6           55
+#define TZ_OWNER_OS_RESERVED_7           56
+#define TZ_OWNER_OS_RESERVED_8           57
+#define TZ_OWNER_OS_RESERVED_9           58
+#define TZ_OWNER_OS_RESERVED_10          59
+#define TZ_OWNER_OS_RESERVED_11          60
+#define TZ_OWNER_OS_RESERVED_12          61
+#define TZ_OWNER_OS_RESERVED_13          62
+#define TZ_OWNER_OS_RESERVED_14          63
+
+#define TZ_SVC_INFO                      6    /* Misc. information services */
+
+/** Trusted Application call groups */
+#define TZ_SVC_APP_ID_PLACEHOLDER        0    /* SVC bits will contain App ID */
+
+/** General helper macro to create a bitmask from bits low to high. */
+#define TZ_MASK_BITS(h, l)     ((0xffffffff >> (32 - ((h - l) + 1))) << l)
+
+/*
+ * Macro used to define an SMC ID based on the owner ID,
+ * service ID, and function number.
+ */
+#define TZ_SYSCALL_CREATE_SMC_ID(o, s, f) \
+	((uint32_t)((((o & 0x3f) << 24) | (s & 0xff) << 8) | (f & 0xff)))
+
+#define TZ_SYSCALL_PARAM_NARGS_MASK  TZ_MASK_BITS(3, 0)
+#define TZ_SYSCALL_PARAM_TYPE_MASK   TZ_MASK_BITS(1, 0)
+
+#define TZ_SYSCALL_CREATE_PARAM_ID(nargs, p1, p2, p3, \
+	p4, p5, p6, p7, p8, p9, p10) \
+	((nargs&TZ_SYSCALL_PARAM_NARGS_MASK)+ \
+	((p1&TZ_SYSCALL_PARAM_TYPE_MASK)<<4)+ \
+	((p2&TZ_SYSCALL_PARAM_TYPE_MASK)<<6)+ \
+	((p3&TZ_SYSCALL_PARAM_TYPE_MASK)<<8)+ \
+	((p4&TZ_SYSCALL_PARAM_TYPE_MASK)<<10)+ \
+	((p5&TZ_SYSCALL_PARAM_TYPE_MASK)<<12)+ \
+	((p6&TZ_SYSCALL_PARAM_TYPE_MASK)<<14)+ \
+	((p7&TZ_SYSCALL_PARAM_TYPE_MASK)<<16)+ \
+	((p8&TZ_SYSCALL_PARAM_TYPE_MASK)<<18)+ \
+	((p9&TZ_SYSCALL_PARAM_TYPE_MASK)<<20)+ \
+	((p10&TZ_SYSCALL_PARAM_TYPE_MASK)<<22))
+
+/*
+ * Macros used to create the Parameter ID associated with the syscall
+ */
+#define TZ_SYSCALL_CREATE_PARAM_ID_0 0
+#define TZ_SYSCALL_CREATE_PARAM_ID_1(p1) \
+	TZ_SYSCALL_CREATE_PARAM_ID(1, p1, 0, 0, 0, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_2(p1, p2) \
+	TZ_SYSCALL_CREATE_PARAM_ID(2, p1, p2, 0, 0, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_3(p1, p2, p3) \
+	TZ_SYSCALL_CREATE_PARAM_ID(3, p1, p2, p3, 0, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_4(p1, p2, p3, p4) \
+	TZ_SYSCALL_CREATE_PARAM_ID(4, p1, p2, p3, p4, 0, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_5(p1, p2, p3, p4, p5) \
+	TZ_SYSCALL_CREATE_PARAM_ID(5, p1, p2, p3, p4, p5, 0, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_6(p1, p2, p3, p4, p5, p6) \
+	TZ_SYSCALL_CREATE_PARAM_ID(6, p1, p2, p3, p4, p5, p6, 0, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_7(p1, p2, p3, p4, p5, p6, p7) \
+	TZ_SYSCALL_CREATE_PARAM_ID(7, p1, p2, p3, p4, p5, p6, p7, 0, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_8(p1, p2, p3, p4, p5, p6, p7, p8) \
+	TZ_SYSCALL_CREATE_PARAM_ID(8, p1, p2, p3, p4, p5, p6, p7, p8, 0, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_9(p1, p2, p3, p4, p5, p6, p7, p8, p9) \
+	TZ_SYSCALL_CREATE_PARAM_ID(9, p1, p2, p3, p4, p5, p6, p7, p8, p9, 0)
+#define TZ_SYSCALL_CREATE_PARAM_ID_10(p1, p2, p3, p4, p5, p6, p7, p8, p9, p10) \
+	TZ_SYSCALL_CREATE_PARAM_ID(10, p1, p2, p3, p4, p5, p6, p7, p8, p9, p10)
+
+/*
+ * Macro used to obtain the Parameter ID associated with the syscall
+ */
+#define TZ_SYSCALL_GET_PARAM_ID(CMD_ID)        CMD_ID ## _PARAM_ID
+
+/** Helper macro to extract the owning entity from the SMC ID. */
+#define TZ_SYSCALL_OWNER_ID(r0)   ((r0 & TZ_MASK_BITS(29, 24)) >> 24)
+
+/** Helper macro for checking whether an owning entity is of type trusted OS. */
+#define IS_OWNER_TRUSTED_OS(owner_id) \
+			(((owner_id >= 50) && (owner_id <= 63)) ? 1:0)
+
+#define TZ_SYSCALL_PARAM_TYPE_VAL              0x0     /* type of value */
+#define TZ_SYSCALL_PARAM_TYPE_BUF_RO           0x1     /* type of buffer RO */
+#define TZ_SYSCALL_PARAM_TYPE_BUF_RW           0x2     /* type of buffer RW */
+
+#define TZ_OS_APP_START_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x01)
+
+#define TZ_OS_APP_START_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_SHUTDOWN_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x02)
+
+#define TZ_OS_APP_SHUTDOWN_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_LOOKUP_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x03)
+
+#define TZ_OS_APP_LOOKUP_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_GET_STATE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x04)
+
+#define TZ_OS_APP_GET_STATE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_APP_REGION_NOTIFICATION_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x05)
+
+#define TZ_OS_APP_REGION_NOTIFICATION_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_REGISTER_LOG_BUFFER_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x06)
+
+#define TZ_OS_REGISTER_LOG_BUFFER_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LOAD_SERVICES_IMAGE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x07)
+
+#define TZ_OS_LOAD_SERVICES_IMAGE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_UNLOAD_SERVICES_IMAGE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_APP_MGR, 0x08)
+
+#define TZ_OS_UNLOAD_SERVICES_IMAGE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_OS_REGISTER_LISTENER_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x01)
+
+#define TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x06)
+
+#define TZ_OS_REGISTER_LISTENER_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_DEREGISTER_LISTENER_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x02)
+
+#define TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x03)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LOAD_EXTERNAL_IMAGE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_EXTERNAL, 0x01)
+
+#define TZ_OS_LOAD_EXTERNAL_IMAGE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_QSAPP_SEND_DATA_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x01)
+
+
+#define TZ_APP_QSAPP_SEND_DATA_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_5( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_EXTERNAL, 0x02)
+
+#define TZ_OS_UNLOAD_EXTERNAL_IMAGE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_INFO_IS_SVC_AVAILABLE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_INFO, 0x01)
+
+#define TZ_INFO_IS_SVC_AVAILABLE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_INFO_GET_FEATURE_VERSION_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_INFO, 0x03)
+
+#define TZ_INFO_GET_FEATURE_VERSION_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_RPMB_PROVISION_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x01)
+
+#define TZ_OS_RPMB_PROVISION_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_RPMB_ERASE_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x02)
+
+#define TZ_OS_RPMB_ERASE_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_OS_RPMB_CHECK_PROV_STATUS_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_RPMB, 0x03)
+
+#define TZ_OS_RPMB_CHECK_PROV_STATUS_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_0
+
+#define TZ_OS_KS_GEN_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x01)
+
+#define TZ_OS_KS_GEN_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_DEL_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x02)
+
+#define TZ_OS_KS_DEL_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_GET_MAX_KEYS_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x03)
+
+#define TZ_OS_KS_GET_MAX_KEYS_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_SET_PIPE_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x04)
+
+#define TZ_OS_KS_SET_PIPE_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_KS_UPDATE_KEY_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_KEYSTORE, 0x05)
+
+#define TZ_OS_KS_UPDATE_KEY_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_2( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_ES_SAVE_PARTITION_HASH_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_ES, 0x01)
+
+#define TZ_ES_SAVE_PARTITION_HASH_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_3( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_ID					\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x02)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_ID_PARAM_ID				\
+	TZ_SYSCALL_CREATE_PARAM_ID_5(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_CLOSE_SESSION_ID					\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x03)
+
+#define TZ_APP_GPAPP_CLOSE_SESSION_ID_PARAM_ID				\
+	TZ_SYSCALL_CREATE_PARAM_ID_5(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_ID					\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x04)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_ID_PARAM_ID				\
+	TZ_SYSCALL_CREATE_PARAM_ID_5(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_REQUEST_CANCELLATION_ID				\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x05)
+
+#define TZ_APP_GPAPP_REQUEST_CANCELLATION_ID_PARAM_ID			\
+	TZ_SYSCALL_CREATE_PARAM_ID_5(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_MDTP_CIPHER_DIP_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_SIP, TZ_SVC_MDTP, 0x1)
+
+#define TZ_MDTP_CIPHER_DIP_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_5( \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RO, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x04)
+
+#define TZ_OS_CONTINUE_BLOCKED_REQUEST_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_1(TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS, \
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x06)
+
+#define TZ_APP_QSAPP_SEND_DATA_WITH_WHITELIST_ID_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_7( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID			\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x07)
+
+#define TZ_APP_GPAPP_OPEN_SESSION_WITH_WHITELIST_ID_PARAM_ID		\
+	TZ_SYSCALL_CREATE_PARAM_ID_7(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID			\
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_TZ_APPS,			\
+	TZ_SVC_APP_ID_PLACEHOLDER, 0x09)
+
+#define TZ_APP_GPAPP_INVOKE_COMMAND_WITH_WHITELIST_ID_PARAM_ID		\
+	TZ_SYSCALL_CREATE_PARAM_ID_7(					\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW,	\
+	TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID \
+	TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x05)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID \
+	TZ_SYSCALL_CREATE_PARAM_ID_4( \
+	TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+	TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
+#endif /* __QSEECOMI_H_ */
diff --git a/include/uapi/linux/fips_status.h b/include/uapi/linux/fips_status.h
index d7cda94..7daf27b 100644
--- a/include/uapi/linux/fips_status.h
+++ b/include/uapi/linux/fips_status.h
@@ -5,24 +5,24 @@
 #include <linux/ioctl.h>
 
 /**
- * fips_status: global FIPS140-2 status
- * @FIPS140_STATUS_NA:
- *					Not a FIPS140-2 compliant Build.
- *					The flag status won't
- *					change throughout
- *					the lifetime
- * @FIPS140_STATUS_PASS_CRYPTO:
- *					KAT self tests are passed.
- * @FIPS140_STATUS_QCRYPTO_ALLOWED:
- *					Integrity test is passed.
- * @FIPS140_STATUS_PASS:
- *					All tests are passed and build
- *					is in FIPS140-2 mode
- * @FIPS140_STATUS_FAIL:
- *					One of the test is failed.
- *					This will block all requests
- *					to crypto modules
- */
+* fips_status: global FIPS140-2 status
+* @FIPS140_STATUS_NA:
+*					Not a FIPS140-2 compliant Build.
+*					The flag status won't
+*					change throughout
+*					the lifetime
+* @FIPS140_STATUS_PASS_CRYPTO:
+*					KAT self tests are passed.
+* @FIPS140_STATUS_QCRYPTO_ALLOWED:
+*					Integrity test is passed.
+* @FIPS140_STATUS_PASS:
+*					All tests are passed and build
+*					is in FIPS140-2 mode
+* @FIPS140_STATUS_FAIL:
+*					One of the test is failed.
+*					This will block all requests
+*					to crypto modules
+*/
 enum fips_status {
 		FIPS140_STATUS_NA				= 0,
 		FIPS140_STATUS_PASS_CRYPTO		= 1,
diff --git a/include/uapi/linux/qcedev.h b/include/uapi/linux/qcedev.h
index 6fee15d..fb51c23 100644
--- a/include/uapi/linux/qcedev.h
+++ b/include/uapi/linux/qcedev.h
@@ -20,14 +20,14 @@
 #define QCEDEV_AES_KEY_192	24
 #define QCEDEV_AES_KEY_256	32
 /**
- *qcedev_oper_enum: Operation types
- * @QCEDEV_OPER_ENC:		Encrypt
- * @QCEDEV_OPER_DEC:		Decrypt
- * @QCEDEV_OPER_ENC_NO_KEY:	Encrypt. Do not need key to be specified by
- *				user. Key already set by an external processor.
- * @QCEDEV_OPER_DEC_NO_KEY:	Decrypt. Do not need the key to be specified by
- *				user. Key already set by an external processor.
- */
+*qcedev_oper_enum: Operation types
+* @QCEDEV_OPER_ENC:		Encrypt
+* @QCEDEV_OPER_DEC:		Decrypt
+* @QCEDEV_OPER_ENC_NO_KEY:	Encrypt. Do not need key to be specified by
+*				user. Key already set by an external processor.
+* @QCEDEV_OPER_DEC_NO_KEY:	Decrypt. Do not need the key to be specified by
+*				user. Key already set by an external processor.
+*/
 enum qcedev_oper_enum {
 	QCEDEV_OPER_DEC		= 0,
 	QCEDEV_OPER_ENC		= 1,
@@ -37,11 +37,11 @@
 };
 
 /**
- *qcedev_oper_enum: Cipher algorithm types
- * @QCEDEV_ALG_DES:		DES
- * @QCEDEV_ALG_3DES:		3DES
- * @QCEDEV_ALG_AES:		AES
- */
+*qcedev_oper_enum: Cipher algorithm types
+* @QCEDEV_ALG_DES:		DES
+* @QCEDEV_ALG_3DES:		3DES
+* @QCEDEV_ALG_AES:		AES
+*/
 enum qcedev_cipher_alg_enum {
 	QCEDEV_ALG_DES		= 0,
 	QCEDEV_ALG_3DES		= 1,
@@ -50,15 +50,15 @@
 };
 
 /**
- *qcedev_cipher_mode_enum : AES mode
- * @QCEDEV_AES_MODE_CBC:		CBC
- * @QCEDEV_AES_MODE_ECB:		ECB
- * @QCEDEV_AES_MODE_CTR:		CTR
- * @QCEDEV_AES_MODE_XTS:		XTS
- * @QCEDEV_AES_MODE_CCM:		CCM
- * @QCEDEV_DES_MODE_CBC:		CBC
- * @QCEDEV_DES_MODE_ECB:		ECB
- */
+*qcedev_cipher_mode_enum : AES mode
+* @QCEDEV_AES_MODE_CBC:		CBC
+* @QCEDEV_AES_MODE_ECB:		ECB
+* @QCEDEV_AES_MODE_CTR:		CTR
+* @QCEDEV_AES_MODE_XTS:		XTS
+* @QCEDEV_AES_MODE_CCM:		CCM
+* @QCEDEV_DES_MODE_CBC:		CBC
+* @QCEDEV_DES_MODE_ECB:		ECB
+*/
 enum qcedev_cipher_mode_enum {
 	QCEDEV_AES_MODE_CBC	= 0,
 	QCEDEV_AES_MODE_ECB	= 1,
@@ -71,13 +71,13 @@
 };
 
 /**
- *enum qcedev_sha_alg_enum : Secure Hashing Algorithm
- * @QCEDEV_ALG_SHA1:		Digest returned: 20 bytes (160 bits)
- * @QCEDEV_ALG_SHA256:		Digest returned: 32 bytes (256 bit)
- * @QCEDEV_ALG_SHA1_HMAC:	HMAC returned 20 bytes (160 bits)
- * @QCEDEV_ALG_SHA256_HMAC:	HMAC returned 32 bytes (256 bit)
- * @QCEDEV_ALG_AES_CMAC:		Configurable MAC size
- */
+*enum qcedev_sha_alg_enum : Secure Hashing Algorithm
+* @QCEDEV_ALG_SHA1:		Digest returned: 20 bytes (160 bits)
+* @QCEDEV_ALG_SHA256:		Digest returned: 32 bytes (256 bit)
+* @QCEDEV_ALG_SHA1_HMAC:	HMAC returned 20 bytes (160 bits)
+* @QCEDEV_ALG_SHA256_HMAC:	HMAC returned 32 bytes (256 bit)
+* @QCEDEV_ALG_AES_CMAC:		Configurable MAC size
+*/
 enum qcedev_sha_alg_enum {
 	QCEDEV_ALG_SHA1		= 0,
 	QCEDEV_ALG_SHA256	= 1,
@@ -88,12 +88,12 @@
 };
 
 /**
- * struct buf_info - Buffer information
- * @offset:			Offset from the base address of the buffer
- *				(Used when buffer is allocated using PMEM)
- * @vaddr:			Virtual buffer address pointer
- * @len:				Size of the buffer
- */
+* struct buf_info - Buffer information
+* @offset:			Offset from the base address of the buffer
+*				(Used when buffer is allocated using PMEM)
+* @vaddr:			Virtual buffer address pointer
+* @len:				Size of the buffer
+*/
 struct	buf_info {
 	union {
 		uint32_t	offset;
@@ -103,26 +103,26 @@
 };
 
 /**
- * struct qcedev_vbuf_info - Source and destination Buffer information
- * @src:				Array of buf_info for input/source
- * @dst:				Array of buf_info for output/destination
- */
+* struct qcedev_vbuf_info - Source and destination Buffer information
+* @src:				Array of buf_info for input/source
+* @dst:				Array of buf_info for output/destination
+*/
 struct	qcedev_vbuf_info {
 	struct buf_info	src[QCEDEV_MAX_BUFFERS];
 	struct buf_info	dst[QCEDEV_MAX_BUFFERS];
 };
 
 /**
- * struct qcedev_pmem_info - Stores PMEM buffer information
- * @fd_src:			Handle to /dev/adsp_pmem used to allocate
- *				memory for input/src buffer
- * @src:				Array of buf_info for input/source
- * @fd_dst:			Handle to /dev/adsp_pmem used to allocate
- *				memory for output/dst buffer
- * @dst:				Array of buf_info for output/destination
- * @pmem_src_offset:		The offset from input/src buffer
- *				(allocated by PMEM)
- */
+* struct qcedev_pmem_info - Stores PMEM buffer information
+* @fd_src:			Handle to /dev/adsp_pmem used to allocate
+*				memory for input/src buffer
+* @src:				Array of buf_info for input/source
+* @fd_dst:			Handle to /dev/adsp_pmem used to allocate
+*				memory for output/dst buffer
+* @dst:				Array of buf_info for output/destination
+* @pmem_src_offset:		The offset from input/src buffer
+*				(allocated by PMEM)
+*/
 struct	qcedev_pmem_info {
 	int		fd_src;
 	struct buf_info	src[QCEDEV_MAX_BUFFERS];
@@ -131,52 +131,52 @@
 };
 
 /**
- * struct qcedev_cipher_op_req - Holds the ciphering request information
- * @use_pmem (IN):	Flag to indicate if buffer source is PMEM
- *			QCEDEV_USE_PMEM/QCEDEV_NO_PMEM
- * @pmem (IN):		Stores PMEM buffer information.
- *			Refer struct qcedev_pmem_info
- * @vbuf (IN/OUT):	Stores Source and destination Buffer information
- *			Refer to struct qcedev_vbuf_info
- * @data_len (IN):	Total Length of input/src and output/dst in bytes
- * @in_place_op (IN):	Indicates whether the operation is inplace where
- *			source == destination
- *			When using PMEM allocated memory, must set this to 1
- * @enckey (IN):		128 bits of confidentiality key
- *			enckey[0] bit 127-120, enckey[1] bit 119-112,..
- *			enckey[15] bit 7-0
- * @encklen (IN):	Length of the encryption key(set to 128  bits/16
- *			bytes in the driver)
- * @iv (IN/OUT):		Initialisation vector data
- *			This is updated by the driver, incremented by
- *			number of blocks encrypted/decrypted.
- * @ivlen (IN):		Length of the IV
- * @byteoffset (IN):	Offset in the Cipher BLOCK (applicable and to be set
- *			for AES-128 CTR mode only)
- * @alg (IN):		Type of ciphering algorithm: AES/DES/3DES
- * @mode (IN):		Mode use when using AES algorithm: ECB/CBC/CTR
- *			Apllicabel when using AES algorithm only
- * @op (IN):		Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or
- *			QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY
- *
- *If use_pmem is set to 0, the driver assumes that memory was not allocated
- * via PMEM, and kernel will need to allocate memory and copy data from user
- * space buffer (data_src/dta_dst) and process accordingly and copy data back
- * to the user space buffer
- *
- * If use_pmem is set to 1, the driver assumes that memory was allocated via
- * PMEM.
- * The kernel driver will use the fd_src to determine the kernel virtual address
- * base that maps to the user space virtual address base for the  buffer
- * allocated in user space.
- * The final input/src and output/dst buffer pointer will be determined
- * by adding the offsets to the kernel virtual addr.
- *
- * If use of hardware key is supported in the target, user can configure the
- * key parameters (encklen, enckey) to use the hardware key.
- * In order to use the hardware key, set encklen to 0 and set the enckey
- * data array to 0.
- */
+* struct qcedev_cipher_op_req - Holds the ciphering request information
+* @use_pmem (IN):	Flag to indicate if buffer source is PMEM
+*			QCEDEV_USE_PMEM/QCEDEV_NO_PMEM
+* @pmem (IN):		Stores PMEM buffer information.
+*			Refer struct qcedev_pmem_info
+* @vbuf (IN/OUT):	Stores Source and destination Buffer information
+*			Refer to struct qcedev_vbuf_info
+* @data_len (IN):	Total Length of input/src and output/dst in bytes
+* @in_place_op (IN):	Indicates whether the operation is inplace where
+*			source == destination
+*			When using PMEM allocated memory, must set this to 1
+* @enckey (IN):		128 bits of confidentiality key
+*			enckey[0] bit 127-120, enckey[1] bit 119-112,..
+*			enckey[15] bit 7-0
+* @encklen (IN):	Length of the encryption key(set to 128  bits/16
+*			bytes in the driver)
+* @iv (IN/OUT):		Initialisation vector data
+*			This is updated by the driver, incremented by
+*			number of blocks encrypted/decrypted.
+* @ivlen (IN):		Length of the IV
+* @byteoffset (IN):	Offset in the Cipher BLOCK (applicable and to be set
+*			for AES-128 CTR mode only)
+* @alg (IN):		Type of ciphering algorithm: AES/DES/3DES
+* @mode (IN):		Mode use when using AES algorithm: ECB/CBC/CTR
+*			Apllicabel when using AES algorithm only
+* @op (IN):		Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or
+*			QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY
+*
+*If use_pmem is set to 0, the driver assumes that memory was not allocated
+* via PMEM, and kernel will need to allocate memory and copy data from user
+* space buffer (data_src/dta_dst) and process accordingly and copy data back
+* to the user space buffer
+*
+* If use_pmem is set to 1, the driver assumes that memory was allocated via
+* PMEM.
+* The kernel driver will use the fd_src to determine the kernel virtual address
+* base that maps to the user space virtual address base for the  buffer
+* allocated in user space.
+* The final input/src and output/dst buffer pointer will be determined
+* by adding the offsets to the kernel virtual addr.
+*
+* If use of hardware key is supported in the target, user can configure the
+* key parameters (encklen, enckey) to use the hardware key.
+* In order to use the hardware key, set encklen to 0 and set the enckey
+* data array to 0.
+*/
 struct	qcedev_cipher_op_req {
 	uint8_t				use_pmem;
 	union {
@@ -197,16 +197,16 @@
 };
 
 /**
- * struct qcedev_sha_op_req - Holds the hashing request information
- * @data (IN):			Array of pointers to the data to be hashed
- * @entries (IN):		Number of buf_info entries in the data array
- * @data_len (IN):		Length of data to be hashed
- * @digest (IN/OUT):		Returns the hashed data information
- * @diglen (OUT):		Size of the hashed/digest data
- * @authkey (IN):		Pointer to authentication key for HMAC
- * @authklen (IN):		Size of the authentication key
- * @alg (IN):			Secure Hash algorithm
- */
+* struct qcedev_sha_op_req - Holds the hashing request information
+* @data (IN):			Array of pointers to the data to be hashed
+* @entries (IN):		Number of buf_info entries in the data array
+* @data_len (IN):		Length of data to be hashed
+* @digest (IN/OUT):		Returns the hashed data information
+* @diglen (OUT):		Size of the hashed/digest data
+* @authkey (IN):		Pointer to authentication key for HMAC
+* @authklen (IN):		Size of the authentication key
+* @alg (IN):			Secure Hash algorithm
+*/
 struct	qcedev_sha_op_req {
 	struct buf_info			data[QCEDEV_MAX_BUFFERS];
 	uint32_t			entries;
@@ -219,20 +219,16 @@
 };
 
 /**
- * struct qfips_verify_t - Holds data for FIPS Integrity test
- * @kernel_size  (IN):		Size of kernel Image
- * @kernel       (IN):		pointer to buffer containing the kernel Image
- */
+* struct qfips_verify_t - Holds data for FIPS Integrity test
+* @kernel_size  (IN):		Size of kernel Image
+* @kernel       (IN):		pointer to buffer containing the kernel Image
+*/
 struct qfips_verify_t {
 	unsigned int kernel_size;
 	void *kernel;
 };
 
 struct file;
-/* temporiraly comment out for msm-4.9 headfile upgrade */
-/* extern long qcedev_ioctl(struct file *file,
- *                 unsigned int cmd, unsigned long arg);
- */
 
 #define QCEDEV_IOC_MAGIC	0x87
 
diff --git a/include/uapi/linux/qseecom.h b/include/uapi/linux/qseecom.h
index b29072e..94e9b00 100644
--- a/include/uapi/linux/qseecom.h
+++ b/include/uapi/linux/qseecom.h
@@ -277,10 +277,6 @@
 
 struct file;
 
-/* temporiraly comment out for msm-4.9 headfile upgrade */
-/* extern long qseecom_ioctl(struct file *file,
- *             unsigned int cmd, unsigned long arg);
- */
 
 #define QSEECOM_IOC_MAGIC    0x97
 
diff --git a/include/uapi/media/cam_req_mgr.h b/include/uapi/media/cam_req_mgr.h
index 3e2b24c..fea6a70 100644
--- a/include/uapi/media/cam_req_mgr.h
+++ b/include/uapi/media/cam_req_mgr.h
@@ -20,6 +20,7 @@
 #define CAM_CPAS_DEVICE_TYPE      (CAM_DEVICE_TYPE_BASE + 7)
 #define CAM_CSIPHY_DEVICE_TYPE    (CAM_DEVICE_TYPE_BASE + 8)
 #define CAM_ACTUATOR_DEVICE_TYPE  (CAM_DEVICE_TYPE_BASE + 9)
+#define CAM_CCI_DEVICE_TYPE       (CAM_DEVICE_TYPE_BASE + 10)
 
 /* cam_req_mgr hdl info */
 #define CAM_REQ_MGR_HDL_IDX_POS           8
diff --git a/sound/soc/msm/msm8996.c b/sound/soc/msm/msm8996.c
index bc5f7e5..45c5479 100644
--- a/sound/soc/msm/msm8996.c
+++ b/sound/soc/msm/msm8996.c
@@ -351,7 +351,7 @@
 static int msm8996_set_spk(struct snd_kcontrol *kcontrol,
 			      struct snd_ctl_elem_value *ucontrol)
 {
-	struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+	struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
 
 	pr_debug("%s() ucontrol->value.integer.value[0] = %ld\n",
 		 __func__, ucontrol->value.integer.value[0]);